code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase : int =16
_lowerCAmelCase : str =32
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE = 1_6 ):
UpperCAmelCase__: Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase__: Tuple = load_dataset("glue" ,"mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__: Dict = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=lowerCAmelCase_ ,max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase__: Optional[Any] = datasets.map(
lowerCAmelCase_ ,batched=lowerCAmelCase_ ,remove_columns=["idx", "sentence1", "sentence2"] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__: Dict = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase__: int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase__: List[str] = 1_6
elif accelerator.mixed_precision != "no":
UpperCAmelCase__: Optional[Any] = 8
else:
UpperCAmelCase__: Any = None
return tokenizer.pad(
lowerCAmelCase_ ,padding="longest" ,max_length=lowerCAmelCase_ ,pad_to_multiple_of=lowerCAmelCase_ ,return_tensors="pt" ,)
# Instantiate dataloaders.
UpperCAmelCase__: str = DataLoader(
tokenized_datasets["train"] ,shuffle=lowerCAmelCase_ ,collate_fn=lowerCAmelCase_ ,batch_size=lowerCAmelCase_ )
UpperCAmelCase__: Dict = DataLoader(
tokenized_datasets["validation"] ,shuffle=lowerCAmelCase_ ,collate_fn=lowerCAmelCase_ ,batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCAmelCase : List[Any] =mocked_dataloaders # noqa: F811
def _A ( SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ):
if os.environ.get("TESTING_MOCKED_DATALOADERS" ,lowerCAmelCase_ ) == "1":
UpperCAmelCase__: Tuple = 2
# New Code #
UpperCAmelCase__: str = int(args.gradient_accumulation_steps )
# Initialize accelerator
UpperCAmelCase__: Optional[Any] = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=lowerCAmelCase_ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__: Dict = config["lr"]
UpperCAmelCase__: Any = int(config["num_epochs"] )
UpperCAmelCase__: List[str] = int(config["seed"] )
UpperCAmelCase__: int = int(config["batch_size"] )
UpperCAmelCase__: List[Any] = evaluate.load("glue" ,"mrpc" )
set_seed(lowerCAmelCase_ )
UpperCAmelCase__ , UpperCAmelCase__: List[str] = get_dataloaders(lowerCAmelCase_ ,lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__: List[str] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__: str = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__: List[Any] = AdamW(params=model.parameters() ,lr=lowerCAmelCase_ )
# Instantiate scheduler
UpperCAmelCase__: Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ ,num_warmup_steps=1_0_0 ,num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__: Any = accelerator.prepare(
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowerCAmelCase_ ):
UpperCAmelCase__: str = model(**lowerCAmelCase_ )
UpperCAmelCase__: Optional[Any] = output.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__: List[str] = model(**lowerCAmelCase_ )
UpperCAmelCase__: Union[str, Any] = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ , UpperCAmelCase__: Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCAmelCase_ ,references=lowerCAmelCase_ ,)
UpperCAmelCase__: List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" ,lowerCAmelCase_ )
def _A ( ):
UpperCAmelCase__: List[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" ,type=lowerCAmelCase_ ,default=lowerCAmelCase_ ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" ,type=lowerCAmelCase_ ,default=1 ,help="The number of minibatches to be ran before gradients are accumulated." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
UpperCAmelCase__: int = parser.parse_args()
UpperCAmelCase__: Any = {"lr": 2e-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(lowerCAmelCase_ ,lowerCAmelCase_ )
if __name__ == "__main__":
main() | 113 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowerCamelCase : List[str] = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Any = '''ernie_m'''
UpperCAmelCase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__(self : List[str] , A__ : int = 2_5_0_0_0_2 , A__ : int = 7_6_8 , A__ : int = 1_2 , A__ : int = 1_2 , A__ : int = 3_0_7_2 , A__ : str = "gelu" , A__ : float = 0.1 , A__ : float = 0.1 , A__ : int = 5_1_4 , A__ : float = 0.0_2 , A__ : int = 1 , A__ : float = 1e-05 , A__ : int=None , A__ : Tuple=False , A__ : List[Any]=0.0 , **A__ : List[Any] , ) -> List[Any]:
super().__init__(pad_token_id=A__ , **A__ )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = classifier_dropout
lowercase = is_decoder
lowercase = act_dropout
| 310 | 0 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[Any] = [0 for i in range(len(_UpperCamelCase ) )]
# initialize interval's left pointer and right pointer
__lowerCAmelCase , __lowerCAmelCase : List[str] = 0, 0
for i in range(1 , len(_UpperCamelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
__lowerCAmelCase : Any = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__lowerCAmelCase : str = min_edge
while go_next(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__lowerCAmelCase , __lowerCAmelCase : int = i, i + z_result[i] - 1
return z_result
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return i + z_result[i] < len(_UpperCamelCase ) and s[z_result[i]] == s[i + z_result[i]]
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__lowerCAmelCase : str = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_UpperCamelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 549 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class A__ ( _lowerCamelCase):
A_ : List[str] = 'beit'
def __init__( self , _SCREAMING_SNAKE_CASE=81_92 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=2_24 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[3, 5, 7, 11] , _SCREAMING_SNAKE_CASE=[1, 2, 3, 6] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.4 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2_55 , **_SCREAMING_SNAKE_CASE , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : Union[str, Any] = hidden_size
__lowerCAmelCase : Dict = num_hidden_layers
__lowerCAmelCase : Any = num_attention_heads
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : List[Any] = hidden_dropout_prob
__lowerCAmelCase : Dict = attention_probs_dropout_prob
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Optional[Any] = layer_norm_eps
__lowerCAmelCase : Any = image_size
__lowerCAmelCase : Union[str, Any] = patch_size
__lowerCAmelCase : List[Any] = num_channels
__lowerCAmelCase : List[Any] = use_mask_token
__lowerCAmelCase : Any = use_absolute_position_embeddings
__lowerCAmelCase : int = use_relative_position_bias
__lowerCAmelCase : List[str] = use_shared_relative_position_bias
__lowerCAmelCase : Tuple = layer_scale_init_value
__lowerCAmelCase : int = drop_path_rate
__lowerCAmelCase : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCAmelCase : Optional[Any] = out_indices
__lowerCAmelCase : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase : Union[str, Any] = use_auxiliary_head
__lowerCAmelCase : Optional[int] = auxiliary_loss_weight
__lowerCAmelCase : Optional[Any] = auxiliary_channels
__lowerCAmelCase : List[str] = auxiliary_num_convs
__lowerCAmelCase : List[str] = auxiliary_concat_input
__lowerCAmelCase : Optional[int] = semantic_loss_ignore_index
class A__ ( _lowerCamelCase):
A_ : List[str] = version.parse('1.11')
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4 | 549 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase = {
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 308 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__UpperCAmelCase = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def lowercase__ ( lowerCamelCase : List[str]=None ) -> List[Any]:
if subparsers is not None:
lowerCAmelCase__ : int = subparsers.add_parser("tpu-config" , description=_description )
else:
lowerCAmelCase__ : int = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
lowerCAmelCase__ : Optional[Any] = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=lowerCamelCase , default=lowerCamelCase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=lowerCamelCase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=lowerCamelCase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
lowerCAmelCase__ : List[Any] = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=lowerCamelCase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase )
return parser
def lowercase__ ( lowerCamelCase : List[str] ) -> List[str]:
lowerCAmelCase__ : Optional[int] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(lowerCamelCase ):
lowerCAmelCase__ : Optional[int] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowerCAmelCase__ : Optional[int] = defaults.command_file
if not args.command and defaults.commands is not None:
lowerCAmelCase__ : str = defaults.commands
if not args.tpu_name:
lowerCAmelCase__ : Optional[Any] = defaults.tpu_name
if not args.tpu_zone:
lowerCAmelCase__ : List[Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
lowerCAmelCase__ : List[Any] = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
lowerCAmelCase__ : Tuple = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , lowerCamelCase ):
lowerCAmelCase__ : str = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
lowerCAmelCase__ : Union[str, Any] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , lowerCamelCase ):
lowerCAmelCase__ : str = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowerCAmelCase__ : Dict = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
lowerCAmelCase__ : List[str] = "; ".join(lowerCamelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowerCAmelCase__ : str = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(lowerCamelCase )}" )
return
subprocess.run(lowerCamelCase )
print("Successfully setup pod." )
def lowercase__ ( ) -> Any:
lowerCAmelCase__ : Optional[Any] = tpu_command_parser()
lowerCAmelCase__ : Dict = parser.parse_args()
tpu_command_launcher(lowerCamelCase )
| 308 | 1 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase_ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase_ = json.load(f)
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Dict , _lowerCAmelCase : Optional[int] ):
return FSMTTokenizer.from_pretrained(__a )
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : int ):
__snake_case : str = FSMTForConditionalGeneration.from_pretrained(__a ).to(__a )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def snake_case__ ( self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple ):
__snake_case : str = f'''facebook/wmt19-{pair}'''
__snake_case : int = self.get_tokenizer(__a )
__snake_case : Tuple = self.get_model(__a )
__snake_case : Tuple = bleu_data[pair]['src']
__snake_case : Union[str, Any] = bleu_data[pair]['tgt']
__snake_case : List[str] = tokenizer(__a , return_tensors="""pt""" , truncation=__a , padding="""longest""" ).to(__a )
__snake_case : Dict = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__snake_case : Any = tokenizer.batch_decode(
__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
__snake_case : Tuple = calculate_bleu(__a , __a )
print(__a )
self.assertGreaterEqual(scores["""bleu"""] , __a )
| 717 | # Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowercase_ = TypeVar("T")
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
def __init__( self : List[Any] , _lowerCAmelCase : bool = True ):
__snake_case : dict[T, list[T]] = {} # dictionary of lists
__snake_case : Union[str, Any] = directed
def snake_case__ ( self : Any , _lowerCAmelCase : T , _lowerCAmelCase : T ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCAmelCase )
self.adj_list[destination_vertex].append(_lowerCAmelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCAmelCase )
__snake_case : Union[str, Any] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_lowerCAmelCase )
__snake_case : List[str] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
__snake_case : Any = [destination_vertex]
__snake_case : Tuple = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCAmelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCAmelCase )
__snake_case : Tuple = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
__snake_case : str = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
__snake_case : Union[str, Any] = [destination_vertex]
__snake_case : List[Any] = []
return self
def __repr__( self : List[str] ):
return pformat(self.adj_list )
| 390 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ = 50 ) -> int:
lowercase : Union[str, Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 336 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __snake_case ( lowerCAmelCase ):
_a : Optional[int]= "openai/whisper-base"
_a : int= (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
_a : int= "transcriber"
_a : List[str]= WhisperProcessor
_a : Optional[int]= WhisperForConditionalGeneration
_a : List[str]= ["audio"]
_a : Any= ["text"]
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return self.pre_processor(snake_case ,return_tensors="""pt""" ).input_features
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return self.model.generate(inputs=snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return self.pre_processor.batch_decode(snake_case ,skip_special_tokens=snake_case )[0]
| 336 | 1 |
from statistics import mean, stdev
def _snake_case ( __snake_case , __snake_case = 3 ):
_UpperCamelCase = min(__snake_case )
_UpperCamelCase = max(__snake_case )
# normalize data
return [round((x - x_min) / (x_max - x_min) , __snake_case ) for x in data]
def _snake_case ( __snake_case , __snake_case = 3 ):
_UpperCamelCase = mean(__snake_case )
_UpperCamelCase = stdev(__snake_case )
# standardize data
return [round((x - mu) / (sigma) , __snake_case ) for x in data]
| 71 | from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = "RegNetConfig"
# Base docstring
_lowerCAmelCase = "facebook/regnet-y-040"
_lowerCAmelCase = [1, 1_088, 7, 7]
# Image classification docstring
_lowerCAmelCase = "facebook/regnet-y-040"
_lowerCAmelCase = "tabby, tabby cat"
_lowerCAmelCase = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : str , _A : int , _A : int = 3 , _A : int = 1 , _A : int = 1 , _A : Optional[str] = "relu" , **_A : Any , ):
super().__init__(**_A )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_UpperCamelCase = tf.keras.layers.ConvaD(
filters=_A , kernel_size=_A , strides=_A , padding='''VALID''' , groups=_A , use_bias=_A , name='''convolution''' , )
_UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
_UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity
def UpperCamelCase_ ( self : Any , _A : Any ):
_UpperCamelCase = self.convolution(self.padding(_A ) )
_UpperCamelCase = self.normalization(_A )
_UpperCamelCase = self.activation(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] , _A : RegNetConfig , **_A : Any ):
super().__init__(**_A )
_UpperCamelCase = config.num_channels
_UpperCamelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def UpperCamelCase_ ( self : List[str] , _A : Optional[int] ):
_UpperCamelCase = shape_list(_A )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_UpperCamelCase = tf.transpose(_A , perm=(0, 2, 3, 1) )
_UpperCamelCase = self.embedder(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : str , _A : int , _A : int = 2 , **_A : Optional[Any] ):
super().__init__(**_A )
_UpperCamelCase = tf.keras.layers.ConvaD(
filters=_A , kernel_size=1 , strides=_A , use_bias=_A , name='''convolution''' )
_UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def UpperCamelCase_ ( self : str , _A : tf.Tensor , _A : bool = False ):
return self.normalization(self.convolution(_A ) , training=_A )
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Dict , _A : int , _A : int , **_A : Dict ):
super().__init__(**_A )
_UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' )
_UpperCamelCase = [
tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def UpperCamelCase_ ( self : List[str] , _A : List[Any] ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
_UpperCamelCase = self.pooler(_A )
for layer_module in self.attention:
_UpperCamelCase = layer_module(_A )
_UpperCamelCase = hidden_state * pooled
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : str ):
super().__init__(**_A )
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = max(1 , out_channels // config.groups_width )
_UpperCamelCase = (
TFRegNetShortCut(_A , stride=_A , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_UpperCamelCase = [
TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.2''' ),
]
_UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Dict , _A : Tuple ):
_UpperCamelCase = hidden_state
for layer_module in self.layers:
_UpperCamelCase = layer_module(_A )
_UpperCamelCase = self.shortcut(_A )
hidden_state += residual
_UpperCamelCase = self.activation(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : int ):
super().__init__(**_A )
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = max(1 , out_channels // config.groups_width )
_UpperCamelCase = (
TFRegNetShortCut(_A , stride=_A , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
_UpperCamelCase = [
TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.3''' ),
]
_UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Tuple , _A : List[Any] ):
_UpperCamelCase = hidden_state
for layer_module in self.layers:
_UpperCamelCase = layer_module(_A )
_UpperCamelCase = self.shortcut(_A )
hidden_state += residual
_UpperCamelCase = self.activation(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Tuple , _A : RegNetConfig , _A : int , _A : int , _A : int = 2 , _A : int = 2 , **_A : Union[str, Any] ):
super().__init__(**_A )
_UpperCamelCase = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
_UpperCamelCase = [
# downsampling is done in the first layer with stride of 2
layer(_A , _A , _A , stride=_A , name='''layers.0''' ),
*[layer(_A , _A , _A , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[int] ):
for layer_module in self.layers:
_UpperCamelCase = layer_module(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _A : RegNetConfig , **_A : List[str] ):
super().__init__(**_A )
_UpperCamelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
_UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_A , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_A , _A , _A , depth=_A , name=F"""stages.{i+1}""" ) )
def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : bool = False , _A : bool = True ):
_UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
_UpperCamelCase = stage_module(_A )
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A )
@keras_serializable
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
UpperCAmelCase = RegNetConfig
def __init__( self : int , _A : Tuple , **_A : int ):
super().__init__(**_A )
_UpperCamelCase = config
_UpperCamelCase = TFRegNetEmbeddings(_A , name='''embedder''' )
_UpperCamelCase = TFRegNetEncoder(_A , name='''encoder''' )
_UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' )
@unpack_inputs
def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.embedder(_A , training=_A )
_UpperCamelCase = self.encoder(
_A , output_hidden_states=_A , return_dict=_A , training=_A )
_UpperCamelCase = encoder_outputs[0]
_UpperCamelCase = self.pooler(_A )
# Change to NCHW output format have uniformity in the modules
_UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) )
_UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_UpperCamelCase = tuple([tf.transpose(_A , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_A , pooler_output=_A , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = RegNetConfig
UpperCAmelCase = "regnet"
UpperCAmelCase = "pixel_values"
@property
def UpperCamelCase_ ( self : Tuple ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_lowerCAmelCase = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowerCAmelCase = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top.", __lowercase, )
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : List[Any] , _A : RegNetConfig , *_A : Optional[int] , **_A : Tuple ):
super().__init__(_A , *_A , **_A )
_UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase_ ( self : Any , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Optional[int]=False , ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.regnet(
pixel_values=_A , output_hidden_states=_A , return_dict=_A , training=_A , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", __lowercase, )
class lowerCAmelCase_ ( __lowercase, __lowercase ):
def __init__( self : List[Any] , _A : RegNetConfig , *_A : Any , **_A : int ):
super().__init__(_A , *_A , **_A )
_UpperCamelCase = config.num_labels
_UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' )
# classification head
_UpperCamelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase_ ( self : str , _A : tf.Tensor = None , _A : tf.Tensor = None , _A : bool = None , _A : bool = None , _A : Any=False , ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.regnet(
_A , output_hidden_states=_A , return_dict=_A , training=_A )
_UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCamelCase = self.classifier[0](_A )
_UpperCamelCase = self.classifier[1](_A )
_UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=_A , logits=_A )
if not return_dict:
_UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
| 71 | 1 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __UpperCamelCase ( unittest.TestCase ):
def a__ ( self :List[Any] ):
snake_case_ : Dict = 0
def a__ ( self :List[str] ):
snake_case_ : Dict = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
def a__ ( self :Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Union[str, Any] = Path(_UpperCAmelCase ) / 'preprocessor_config.json'
snake_case_ : Dict = Path(_UpperCAmelCase ) / 'config.json'
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(_UpperCAmelCase ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(_UpperCAmelCase ,"""w""" ) )
snake_case_ : List[Any] = AutoImageProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
def a__ ( self :List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Tuple = Path(_UpperCAmelCase ) / 'preprocessor_config.json'
snake_case_ : Tuple = Path(_UpperCAmelCase ) / 'config.json'
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} ,open(_UpperCAmelCase ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(_UpperCAmelCase ,"""w""" ) )
snake_case_ : int = AutoImageProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
def a__ ( self :Tuple ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Optional[int] = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case_ : Union[str, Any] = Path(_UpperCAmelCase ) / 'preprocessor_config.json'
snake_case_ : List[Any] = Path(_UpperCAmelCase ) / 'config.json'
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(_UpperCAmelCase ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(_UpperCAmelCase ,"""w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case_ : int = AutoImageProcessor.from_pretrained(_UpperCAmelCase ).to_dict()
config_dict.pop("""image_processor_type""" )
snake_case_ : List[Any] = CLIPImageProcessor(**_UpperCAmelCase )
# save in new folder
model_config.save_pretrained(_UpperCAmelCase )
config.save_pretrained(_UpperCAmelCase )
snake_case_ : str = AutoImageProcessor.from_pretrained(_UpperCAmelCase )
# make sure private variable is not incorrectly saved
snake_case_ : List[str] = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
def a__ ( self :Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Dict = Path(_UpperCAmelCase ) / 'preprocessor_config.json'
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(_UpperCAmelCase ,"""w""" ) ,)
snake_case_ : int = AutoImageProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
def a__ ( self :int ):
with self.assertRaisesRegex(
_UpperCAmelCase ,"""clip-base is not a local folder and is not a valid model identifier""" ):
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("""clip-base""" )
def a__ ( self :Tuple ):
with self.assertRaisesRegex(
_UpperCAmelCase ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case_ : List[Any] = AutoImageProcessor.from_pretrained(_UpperCAmelCase ,revision="""aaaaaa""" )
def a__ ( self :Optional[int] ):
with self.assertRaisesRegex(
_UpperCAmelCase ,"""hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" ,):
snake_case_ : Dict = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def a__ ( self :Optional[int] ):
with self.assertRaises(_UpperCAmelCase ):
snake_case_ : int = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCAmelCase ):
snake_case_ : Dict = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=_UpperCAmelCase )
snake_case_ : Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=_UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_UpperCAmelCase )
snake_case_ : List[Any] = AutoImageProcessor.from_pretrained(_UpperCAmelCase ,trust_remote_code=_UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ ,"""NewImageProcessor""" )
def a__ ( self :Optional[int] ):
try:
AutoConfig.register("""custom""" ,_UpperCAmelCase )
AutoImageProcessor.register(_UpperCAmelCase ,_UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase ):
AutoImageProcessor.register(_UpperCAmelCase ,_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Optional[int] = Path(_UpperCAmelCase ) / 'preprocessor_config.json'
snake_case_ : Optional[Any] = Path(_UpperCAmelCase ) / 'config.json'
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} ,open(_UpperCAmelCase ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(_UpperCAmelCase ,"""w""" ) )
snake_case_ : Union[str, Any] = CustomImageProcessor.from_pretrained(_UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_UpperCAmelCase )
snake_case_ : List[str] = AutoImageProcessor.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def a__ ( self :Any ):
class __UpperCamelCase ( a__ ):
lowercase : str = True
try:
AutoConfig.register("""custom""" ,_UpperCAmelCase )
AutoImageProcessor.register(_UpperCAmelCase ,_UpperCAmelCase )
# If remote code is not set, the default is to use local
snake_case_ : Dict = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case_ : Dict = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=_UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case_ : Tuple = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=_UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(not hasattr(_UpperCAmelCase ,"""is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] | 334 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[Any]: # noqa: E741
__A : Tuple = len(__snake_case )
__A : Optional[int] = 0
__A : str = [0] * n
__A : int = [False] * n
__A : Tuple = [False] * n
def dfs(__snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int ):
if parent == root:
out_edge_count += 1
__A : str = True
__A : Tuple = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__A : Optional[int] = dfs(__snake_case , __snake_case , __snake_case , __snake_case )
__A : int = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__A : Tuple = True
# AP found via cycle
if at == low[to]:
__A : Optional[Any] = True
else:
__A : Any = min(low[at] , __snake_case )
return out_edge_count
for i in range(__snake_case ):
if not visited[i]:
__A : Tuple = 0
__A : List[Any] = dfs(__snake_case , __snake_case , -1 , __snake_case )
__A : Union[str, Any] = out_edge_count > 1
for x in range(len(__snake_case ) ):
if is_art[x] is True:
print(__snake_case )
# Adjacency list of graph
lowercase__ : Tuple = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 8 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[str] = (PNDMScheduler,)
_UpperCamelCase : Optional[int] = (('num_inference_steps', 50),)
def SCREAMING_SNAKE_CASE_ ( self : Any , **a : Any )-> Optional[Any]:
"""simple docstring"""
lowercase__ = {
'num_train_timesteps': 1_000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**a )
return config
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[Any]=0 , **a : int )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop('num_inference_steps' , a )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**a )
lowercase__ = scheduler_class(**a )
scheduler.set_timesteps(a )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a )
lowercase__ = scheduler_class.from_pretrained(a )
new_scheduler.set_timesteps(a )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step_prk(a , a , a , **a ).prev_sample
lowercase__ = new_scheduler.step_prk(a , a , a , **a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step_plms(a , a , a , **a ).prev_sample
lowercase__ = new_scheduler.step_plms(a , a , a , **a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : str , a : List[Any]=0 , **a : List[Any] )-> str:
"""simple docstring"""
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop('num_inference_steps' , a )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**a )
scheduler.set_timesteps(a )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a )
lowercase__ = scheduler_class.from_pretrained(a )
# copy over dummy past residuals
new_scheduler.set_timesteps(a )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step_prk(a , a , a , **a ).prev_sample
lowercase__ = new_scheduler.step_prk(a , a , a , **a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step_plms(a , a , a , **a ).prev_sample
lowercase__ = new_scheduler.step_plms(a , a , a , **a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **a : Dict )-> Tuple:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**a )
lowercase__ = scheduler_class(**a )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(a )
for i, t in enumerate(scheduler.prk_timesteps ):
lowercase__ = model(a , a )
lowercase__ = scheduler.step_prk(a , a , a ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowercase__ = model(a , a )
lowercase__ = scheduler.step_plms(a , a , a ).prev_sample
return sample
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop('num_inference_steps' , a )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**a )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , 'set_timesteps' ):
scheduler.set_timesteps(a )
elif num_inference_steps is not None and not hasattr(a , 'set_timesteps' ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step_prk(a , 0 , a , **a ).prev_sample
lowercase__ = scheduler.step_prk(a , 1 , a , **a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowercase__ = scheduler.step_plms(a , 0 , a , **a ).prev_sample
lowercase__ = scheduler.step_plms(a , 1 , a , **a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[Any]:
"""simple docstring"""
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> int:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a )
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(steps_offset=1 )
lowercase__ = scheduler_class(**a )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=a , beta_end=a )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Any:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Tuple:
"""simple docstring"""
lowercase__ = 27
for scheduler_class in self.scheduler_classes:
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**a )
scheduler.set_timesteps(a )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowercase__ = scheduler.step_prk(a , a , a ).prev_sample
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[int]:
"""simple docstring"""
with self.assertRaises(a ):
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**a )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
lowercase__ = self.full_loop()
lowercase__ = torch.sum(torch.abs(a ) )
lowercase__ = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.full_loop(prediction_type='v_prediction' )
lowercase__ = torch.sum(torch.abs(a ) )
lowercase__ = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=a , beta_start=0.01 )
lowercase__ = torch.sum(torch.abs(a ) )
lowercase__ = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=a , beta_start=0.01 )
lowercase__ = torch.sum(torch.abs(a ) )
lowercase__ = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 45 |
from string import ascii_uppercase
lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
lowercase__ = ''
lowercase__ = 0
lowercase__ = 0
while div != 1:
lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if base >= 11 and 9 < mod < 36:
lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )]
else:
lowercase__ = str(_SCREAMING_SNAKE_CASE )
new_value += actual_value
lowercase__ = num // base
lowercase__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 45 | 1 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : List[Any] , UpperCAmelCase_ : str = "cpu" , UpperCAmelCase_ : str = "openai/clip-vit-large-patch14") ->None:
'''simple docstring'''
lowerCamelCase__: Tuple =device
lowerCamelCase__: Optional[int] =CLIPTokenizerFast.from_pretrained(UpperCAmelCase_)
lowerCamelCase__: Any =[0.4814_5466, 0.457_8275, 0.4082_1073]
lowerCamelCase__: Union[str, Any] =[0.2686_2954, 0.2613_0258, 0.2757_7711]
lowerCamelCase__: Union[str, Any] =torchvision.transforms.Normalize(self.image_mean , self.image_std)
lowerCamelCase__: List[Any] =torchvision.transforms.Resize(224)
lowerCamelCase__: Dict =torchvision.transforms.CenterCrop(224)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : str) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.resize(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.center_crop(UpperCAmelCase_)
lowerCamelCase__: Dict =self.normalize(UpperCAmelCase_)
return images
def __call__(self : Any , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.tokenizer(text=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: str =self.preprocess_img(UpperCAmelCase_)
lowerCamelCase__: str ={key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : List[Any]=10 , UpperCAmelCase_ : int=0.01 , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str="image" , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[Any]=False , ) ->None:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Optional[int] =None
lowerCamelCase__: str =device if device else get_device()
if vqgan:
lowerCamelCase__: Dict =vqgan
else:
lowerCamelCase__: Optional[Any] =load_vqgan(self.device , conf_path=UpperCAmelCase_ , ckpt_path=UpperCAmelCase_)
self.vqgan.eval()
if clip:
lowerCamelCase__: Union[str, Any] =clip
else:
lowerCamelCase__: str =CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
self.clip.to(self.device)
lowerCamelCase__: str =ProcessorGradientFlow(device=self.device)
lowerCamelCase__: Union[str, Any] =iterations
lowerCamelCase__: str =lr
lowerCamelCase__: str =log
lowerCamelCase__: str =make_grid
lowerCamelCase__: int =return_val
lowerCamelCase__: int =quantize
lowerCamelCase__: Dict =self.vqgan.decoder.z_shape
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : Union[str, Any]=True) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Dict =[]
if output_path is None:
lowerCamelCase__: Union[str, Any] ="./animation.gif"
if input_path is None:
lowerCamelCase__: List[Any] =self.save_path
lowerCamelCase__: Any =sorted(glob(input_path + "/*"))
if not len(UpperCAmelCase_):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)")
if len(UpperCAmelCase_) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)")
lowerCamelCase__: Tuple =total_duration / len(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =[frame_duration] * len(UpperCAmelCase_)
if extend_frames:
lowerCamelCase__: int =1.5
lowerCamelCase__: Optional[int] =3
for file_name in paths:
if file_name.endswith(".png"):
images.append(imageio.imread(UpperCAmelCase_))
imageio.mimsave(UpperCAmelCase_ , UpperCAmelCase_ , duration=UpperCAmelCase_)
print(F"""gif saved to {output_path}""")
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None) ->Tuple:
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor")
if img is not None:
raise NotImplementedError
lowerCamelCase__: str =preprocess(Image.open(UpperCAmelCase_) , target_image_size=256).to(self.device)
lowerCamelCase__: Any =preprocess_vqgan(UpperCAmelCase_)
lowerCamelCase__ , *lowerCamelCase__: str =self.vqgan.encode(UpperCAmelCase_)
return z
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : List[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.latent.detach().requires_grad_()
lowerCamelCase__: List[Any] =base_latent + transform_vector
if self.quantize:
lowerCamelCase__ , *lowerCamelCase__: int =self.vqgan.quantize(UpperCAmelCase_)
else:
lowerCamelCase__: str =trans_latent
return self.vqgan.decode(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any]=None) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.clip_preprocessor(text=UpperCAmelCase_ , images=UpperCAmelCase_ , return_tensors="pt" , padding=UpperCAmelCase_)
lowerCamelCase__: List[str] =self.clip(**UpperCAmelCase_)
lowerCamelCase__: int =clip_outputs.logits_per_image
if weights is not None:
lowerCamelCase__: Optional[int] =similarity_logits * weights
return similarity_logits.sum()
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : int) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self._get_clip_similarity(pos_prompts["prompts"] , UpperCAmelCase_ , weights=(1 / pos_prompts["weights"]))
if neg_prompts:
lowerCamelCase__: Tuple =self._get_clip_similarity(neg_prompts["prompts"] , UpperCAmelCase_ , weights=neg_prompts["weights"])
else:
lowerCamelCase__: List[Any] =torch.tensor([1] , device=self.device)
lowerCamelCase__: Union[str, Any] =-torch.log(UpperCAmelCase_) + torch.log(UpperCAmelCase_)
return loss
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Dict =torch.randn_like(self.latent , requires_grad=UpperCAmelCase_ , device=self.device)
lowerCamelCase__: Any =torch.optim.Adam([vector] , lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
lowerCamelCase__: List[Any] =self._add_vector(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =loop_post_process(UpperCAmelCase_)
lowerCamelCase__: str =self._get_CLIP_loss(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
print("CLIP loss" , UpperCAmelCase_)
if self.log:
wandb.log({"CLIP Loss": clip_loss})
clip_loss.backward(retain_graph=UpperCAmelCase_)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any]) ->str:
'''simple docstring'''
wandb.init(reinit=UpperCAmelCase_ , project="face-editor")
wandb.config.update({"Positive Prompts": positive_prompts})
wandb.config.update({"Negative Prompts": negative_prompts})
wandb.config.update({"lr": self.lr, "iterations": self.iterations})
if image_path:
lowerCamelCase__: Dict =Image.open(UpperCAmelCase_)
lowerCamelCase__: str =image.resize((256, 256))
wandb.log("Original Image" , wandb.Image(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Optional[int]) ->int:
'''simple docstring'''
if not prompts:
return []
lowerCamelCase__: Optional[Any] =[]
lowerCamelCase__: Union[str, Any] =[]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: Tuple =[prompt.strip() for prompt in prompts.split("|")]
for prompt in prompts:
if isinstance(UpperCAmelCase_ , (tuple, list)):
lowerCamelCase__: Optional[Any] =prompt[0]
lowerCamelCase__: Dict =float(prompt[1])
elif ":" in prompt:
lowerCamelCase__ , lowerCamelCase__: Optional[Any] =prompt.split(":")
lowerCamelCase__: str =float(UpperCAmelCase_)
else:
lowerCamelCase__: List[str] =prompt
lowerCamelCase__: Any =1.0
processed_prompts.append(UpperCAmelCase_)
weights.append(UpperCAmelCase_)
return {
"prompts": processed_prompts,
"weights": torch.tensor(UpperCAmelCase_ , device=self.device),
}
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=None , ) ->List[str]:
'''simple docstring'''
if image_path:
lowerCamelCase__: Any =self._get_latent(UpperCAmelCase_)
else:
lowerCamelCase__: int =torch.randn(self.latent_dim , device=self.device)
if self.log:
self._init_logging(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
assert pos_prompts, "You must provide at least one positive prompt."
lowerCamelCase__: Optional[int] =self.process_prompts(UpperCAmelCase_)
lowerCamelCase__: Dict =self.process_prompts(UpperCAmelCase_)
if save_final and save_path is None:
lowerCamelCase__: Optional[Any] =os.path.join("./outputs/" , "_".join(pos_prompts["prompts"]))
if not os.path.exists(UpperCAmelCase_):
os.makedirs(UpperCAmelCase_)
else:
lowerCamelCase__: Dict =save_path + "_" + get_timestamp()
os.makedirs(UpperCAmelCase_)
lowerCamelCase__: int =save_path
lowerCamelCase__: Dict =self.vqgan.decode(self.latent)[0]
if show_intermediate:
print("Original Image")
show_pil(custom_to_pil(UpperCAmelCase_))
lowerCamelCase__: List[Any] =loop_post_process(UpperCAmelCase_)
for iter, transformed_img in enumerate(self._optimize_CLIP(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)):
if show_intermediate:
show_pil(UpperCAmelCase_)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}.png"""))
if self.log:
wandb.log({"Image": wandb.Image(UpperCAmelCase_)})
if show_final:
show_pil(UpperCAmelCase_)
if save_final:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}_final.png"""))
| 59 |
from manim import *
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = Rectangle(height=0.5 ,width=0.5 )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
_lowercase = Rectangle(height=0.25 ,width=0.25 )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('CPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(4 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('GPU' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
gpu.move_to([-1, -1, 0] )
self.add(__A )
_lowercase = [mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = Text('Model' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
model.move_to([3, -1.0, 0] )
self.add(__A )
_lowercase = []
_lowercase = []
for i, rect in enumerate(__A ):
_lowercase = fill.copy().set_fill(__A ,opacity=0.8 )
target.move_to(__A )
model_arr.append(__A )
_lowercase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__A ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__A )
self.add(*__A ,*__A )
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = [meta_mem.copy() for i in range(6 )]
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(*__A ).arrange(__A ,buff=0 )
_lowercase = VGroup(__A ,__A ).arrange(__A ,buff=0 )
_lowercase = Text('Disk' ,font_size=24 )
_lowercase = Group(__A ,__A ).arrange(__A ,buff=0.5 ,aligned_edge=__A )
disk.move_to([-4, -1.25, 0] )
self.add(__A ,__A )
_lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowercase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__A ,__A )
_lowercase = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(__A ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__A )
_lowercase = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ) )
_lowercase = Square(0.3 )
input.set_fill(__A ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__A ,buff=0.5 )
self.play(Write(__A ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__A ,buff=0.02 )
self.play(MoveToTarget(__A ) )
self.play(FadeOut(__A ) )
_lowercase = Arrow(start=__A ,end=__A ,color=__A ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__A ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_lowercase = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) )
_lowercase = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__A ) ,Circumscribe(model_arr[0] ,color=__A ,**__A ) ,Circumscribe(model_cpu_arr[0] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
_lowercase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__A ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
_lowercase = AnimationGroup(
FadeOut(__A ,run_time=0.5 ) ,MoveToTarget(__A ,run_time=0.5 ) ,FadeIn(__A ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__A )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_lowercase = 0.7
self.play(
Circumscribe(model_arr[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i] ,**__A ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,Circumscribe(model_arr[i + 1] ,color=__A ,**__A ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__A ,**__A ) ,Circumscribe(cpu_left_col_base[-1] ,color=__A ,**__A ) ,Circumscribe(gpu_rect[0] ,color=__A ,**__A ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
_lowercase = a_c
_lowercase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__A ) ,FadeOut(__A ,run_time=0.5 ) ,)
_lowercase = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__A ,run_time=3 ) ,MoveToTarget(__A ) )
self.wait() | 67 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
__lowercase = len(lowerCamelCase_ )
__lowercase = len(lowerCamelCase_ )
__lowercase = (
first_str_length if first_str_length > second_str_length else second_str_length
)
__lowercase = []
for char_count in range(lowerCamelCase_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(lowerCamelCase_ )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
| 56 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_SCREAMING_SNAKE_CASE = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
_SCREAMING_SNAKE_CASE = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
_SCREAMING_SNAKE_CASE = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ):
return float((preds == labels).mean() )
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : str ):
__lowercase = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
__lowercase = float(pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0] )
__lowercase = float(spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_lowerCamelCase ,_lowerCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_lowerCamelCase ,_lowerCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_lowerCamelCase ,_lowerCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 56 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] ,a__ : List[Any] ,a__ : Optional[Any]=7 ,a__ : Tuple=3 ,a__ : Optional[Any]=18 ,a__ : Union[str, Any]=30 ,a__ : str=4_00 ,a__ : Tuple=True ,a__ : int=None ,a__ : Optional[Any]=True ,):
a__ = size if size is not None else {"height": 18, "width": 18}
a__ = parent
a__ = batch_size
a__ = num_channels
a__ = image_size
a__ = min_resolution
a__ = max_resolution
a__ = do_resize
a__ = size
a__ = apply_ocr
def lowerCAmelCase_ ( self : str ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCamelCase__ ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase_ ( self : Tuple ):
a__ = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Tuple ):
a__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ ,"do_resize" ) )
self.assertTrue(hasattr(a__ ,"size" ) )
self.assertTrue(hasattr(a__ ,"apply_ocr" ) )
def lowerCAmelCase_ ( self : int ):
a__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 18, "width": 18} )
a__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"height": 42, "width": 42} )
def lowerCAmelCase_ ( self : int ):
pass
def lowerCAmelCase_ ( self : Tuple ):
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ ,Image.Image )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
self.assertIsInstance(encoding.words ,a__ )
self.assertIsInstance(encoding.boxes ,a__ )
# Test batched
a__ = image_processing(a__ ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
def lowerCAmelCase_ ( self : str ):
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=a__ ,numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ ,np.ndarray )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
# Test batched
a__ = image_processing(a__ ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
def lowerCAmelCase_ ( self : Union[str, Any] ):
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=a__ ,torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ ,torch.Tensor )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
# Test batched
a__ = image_processing(a__ ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) ,)
def lowerCAmelCase_ ( self : Union[str, Any] ):
# with apply_OCR = True
a__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
a__ = load_dataset("hf-internal-testing/fixtures_docvqa" ,split="test" )
a__ = Image.open(ds[0]["file"] ).convert("RGB" )
a__ = image_processing(a__ ,return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a__ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
a__ = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,a__ )
self.assertListEqual(encoding.boxes ,a__ )
# with apply_OCR = False
a__ = LayoutLMvaImageProcessor(apply_ocr=a__ )
a__ = image_processing(a__ ,return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_24, 2_24) )
| 331 |
'''simple docstring'''
from collections import defaultdict
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Tuple ,a__ : List[str] ,a__ : str ):
a__ = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
a__ = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(a__ ) )
]
a__ = defaultdict(a__ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
a__ = (1 << len(a__ )) - 1
def lowerCAmelCase_ ( self : Union[str, Any] ,a__ : Tuple ,a__ : Optional[int] ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
a__ = self.count_ways_until(a__ ,task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) ,task_no + 1 )
# save the value.
a__ = total_ways_util
return self.dp[mask][task_no]
def lowerCAmelCase_ ( self : Dict ,a__ : Union[str, Any] ):
# Store the list of persons for each task
for i in range(len(a__ ) ):
for j in task_performed[i]:
self.task[j].append(a__ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 ,1 )
if __name__ == "__main__":
UpperCamelCase_ : Dict = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
UpperCamelCase_ : List[str] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 331 | 1 |
"""simple docstring"""
def snake_case (A_ :Dict = 2_0_0_0_0_0_0 ):
'''simple docstring'''
a : List[Any] = [0 for i in range(n + 1 )]
a : Union[str, Any] = 1
a : Optional[Any] = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , A_ ):
a : Optional[int] = 1
a : Dict = 0
for i in range(A_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 714 |
"""simple docstring"""
from __future__ import annotations
def snake_case (A_ :float , A_ :float , A_ :float ):
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 118 | 0 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
UpperCAmelCase = logging.getLogger(__name__)
class lowercase ( lowercase__ ):
lowercase = '''summarization'''
lowercase = ['''loss''']
lowercase = ROUGE_KEYS
lowercase = '''rouge2'''
def __init__(self : int ,SCREAMING_SNAKE_CASE_ : str ,**SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCAmelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(SCREAMING_SNAKE_CASE_ ,num_labels=SCREAMING_SNAKE_CASE_ ,mode=self.mode ,**SCREAMING_SNAKE_CASE_ )
use_task_specific_params(self.model ,'''summarization''' )
save_git_info(self.hparams.output_dir )
lowerCAmelCase = Path(self.output_dir ) / '''metrics.json'''
lowerCAmelCase = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams ,self.hparams_save_path )
lowerCAmelCase = 0
lowerCAmelCase = defaultdict(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = self.config.model_type
lowerCAmelCase = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
lowerCAmelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCAmelCase = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
lowerCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCAmelCase = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], F"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCAmelCase = get_git_info()['''repo_sha''']
lowerCAmelCase = hparams.num_workers
lowerCAmelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCAmelCase = self.decoder_start_token_id
lowerCAmelCase = (
SeqaSeqDataset if hasattr(self.tokenizer ,'''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
lowerCAmelCase = False
lowerCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCAmelCase = self.hparams.eval_max_gen_length
else:
lowerCAmelCase = self.model.config.max_length
lowerCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCAmelCase (self : Any ,SCREAMING_SNAKE_CASE_ : Dict[str, torch.Tensor] ) -> Dict[str, List[str]]:
"""simple docstring"""
lowerCAmelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(SCREAMING_SNAKE_CASE_ ,Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} ,Path(self.output_dir ) / '''tok_batch.json''' )
lowerCAmelCase = True
return readable_batch
def UpperCAmelCase (self : Tuple ,SCREAMING_SNAKE_CASE_ : Union[str, Any] ,**SCREAMING_SNAKE_CASE_ : Tuple ) -> str:
"""simple docstring"""
return self.model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : List[Any] ,SCREAMING_SNAKE_CASE_ : List[int] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = self.tokenizer.batch_decode(
SCREAMING_SNAKE_CASE_ ,skip_special_tokens=SCREAMING_SNAKE_CASE_ ,clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
return lmap(str.strip ,SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Any ,SCREAMING_SNAKE_CASE_ : dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = self.tokenizer.pad_token_id
lowerCAmelCase , lowerCAmelCase = batch['''input_ids'''], batch['''attention_mask''']
lowerCAmelCase = batch['''labels''']
if isinstance(self.model ,SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase = self.model._shift_right(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase = shift_tokens_right(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCAmelCase = decoder_input_ids
self.save_readable_batch(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = self(SCREAMING_SNAKE_CASE_ ,attention_mask=SCREAMING_SNAKE_CASE_ ,decoder_input_ids=SCREAMING_SNAKE_CASE_ ,use_cache=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCAmelCase = nn.CrossEntropyLoss(ignore_index=SCREAMING_SNAKE_CASE_ )
assert lm_logits.shape[-1] == self.vocab_size
lowerCAmelCase = ce_loss_fct(lm_logits.view(-1 ,lm_logits.shape[-1] ) ,tgt_ids.view(-1 ) )
else:
lowerCAmelCase = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ ,dim=-1 )
lowerCAmelCase , lowerCAmelCase = label_smoothed_nll_loss(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,self.hparams.label_smoothing ,ignore_index=SCREAMING_SNAKE_CASE_ )
return (loss,)
@property
def UpperCAmelCase (self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.tokenizer.pad_token_id
def UpperCAmelCase (self : Optional[int] ,SCREAMING_SNAKE_CASE_ : Tuple ,SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = self._step(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = dict(zip(self.loss_names ,SCREAMING_SNAKE_CASE_ ) )
# tokens per batch
lowerCAmelCase = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
lowerCAmelCase = batch['''input_ids'''].shape[0]
lowerCAmelCase = batch['''input_ids'''].eq(self.pad ).sum()
lowerCAmelCase = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCAmelCase (self : str ,SCREAMING_SNAKE_CASE_ : List[Any] ,SCREAMING_SNAKE_CASE_ : Dict ) -> Dict:
"""simple docstring"""
return self._generative_step(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Optional[int] ,SCREAMING_SNAKE_CASE_ : Optional[Any] ,SCREAMING_SNAKE_CASE_ : List[str]="val" ) -> Dict:
"""simple docstring"""
self.step_count += 1
lowerCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCAmelCase = losses['''loss''']
lowerCAmelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
lowerCAmelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCAmelCase = torch.tensor(SCREAMING_SNAKE_CASE_ ).type_as(SCREAMING_SNAKE_CASE_ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = {F"""{prefix}_avg_{k}""": x for k, x in losses.items()}
lowerCAmelCase = self.step_count
self.metrics[prefix].append(SCREAMING_SNAKE_CASE_ ) # callback writes this to self.metrics_save_path
lowerCAmelCase = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"""{prefix}_loss""": loss,
F"""{prefix}_{self.val_metric}""": metric_tensor,
}
def UpperCAmelCase (self : Union[str, Any] ,SCREAMING_SNAKE_CASE_ : Union[str, Any] ,SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
"""simple docstring"""
return calculate_rouge(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : List[str] ,SCREAMING_SNAKE_CASE_ : dict ) -> dict:
"""simple docstring"""
lowerCAmelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCAmelCase = self.model.generate(
batch['''input_ids'''] ,attention_mask=batch['''attention_mask'''] ,use_cache=SCREAMING_SNAKE_CASE_ ,decoder_start_token_id=self.decoder_start_token_id ,num_beams=self.eval_beams ,max_length=self.eval_max_length ,)
lowerCAmelCase = (time.time() - ta) / batch['''input_ids'''].shape[0]
lowerCAmelCase = self.ids_to_clean_text(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = self.ids_to_clean_text(batch['''labels'''] )
lowerCAmelCase = self._step(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = dict(zip(self.loss_names ,SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase = self.calc_generative_metrics(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = np.mean(lmap(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) )
base_metrics.update(gen_time=SCREAMING_SNAKE_CASE_ ,gen_len=SCREAMING_SNAKE_CASE_ ,preds=SCREAMING_SNAKE_CASE_ ,target=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
return base_metrics
def UpperCAmelCase (self : Dict ,SCREAMING_SNAKE_CASE_ : int ,SCREAMING_SNAKE_CASE_ : Any ) -> Tuple:
"""simple docstring"""
return self._generative_step(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Optional[Any] ,SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.validation_epoch_end(SCREAMING_SNAKE_CASE_ ,prefix='''test''' )
def UpperCAmelCase (self : int ,SCREAMING_SNAKE_CASE_ : List[Any] ) -> SeqaSeqDataset:
"""simple docstring"""
lowerCAmelCase = self.n_obs[type_path]
lowerCAmelCase = self.target_lens[type_path]
lowerCAmelCase = self.dataset_class(
self.tokenizer ,type_path=SCREAMING_SNAKE_CASE_ ,n_obs=SCREAMING_SNAKE_CASE_ ,max_target_length=SCREAMING_SNAKE_CASE_ ,**self.dataset_kwargs ,)
return dataset
def UpperCAmelCase (self : Dict ,SCREAMING_SNAKE_CASE_ : str ,SCREAMING_SNAKE_CASE_ : int ,SCREAMING_SNAKE_CASE_ : bool = False ) -> DataLoader:
"""simple docstring"""
lowerCAmelCase = self.get_dataset(SCREAMING_SNAKE_CASE_ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCAmelCase = dataset.make_sortish_sampler(SCREAMING_SNAKE_CASE_ ,distributed=self.hparams.gpus > 1 )
return DataLoader(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,collate_fn=dataset.collate_fn ,shuffle=SCREAMING_SNAKE_CASE_ ,num_workers=self.num_workers ,sampler=SCREAMING_SNAKE_CASE_ ,)
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCAmelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch ,distributed=self.hparams.gpus > 1 )
return DataLoader(
SCREAMING_SNAKE_CASE_ ,batch_sampler=SCREAMING_SNAKE_CASE_ ,collate_fn=dataset.collate_fn ,num_workers=self.num_workers ,)
else:
return DataLoader(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,collate_fn=dataset.collate_fn ,shuffle=SCREAMING_SNAKE_CASE_ ,num_workers=self.num_workers ,sampler=SCREAMING_SNAKE_CASE_ ,)
def UpperCAmelCase (self : str ) -> DataLoader:
"""simple docstring"""
lowerCAmelCase = self.get_dataloader('''train''' ,batch_size=self.hparams.train_batch_size ,shuffle=SCREAMING_SNAKE_CASE_ )
return dataloader
def UpperCAmelCase (self : Dict ) -> DataLoader:
"""simple docstring"""
return self.get_dataloader('''val''' ,batch_size=self.hparams.eval_batch_size )
def UpperCAmelCase (self : Union[str, Any] ) -> DataLoader:
"""simple docstring"""
return self.get_dataloader('''test''' ,batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCAmelCase (SCREAMING_SNAKE_CASE_ : str ,SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict:
"""simple docstring"""
BaseTransformer.add_model_specific_args(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
add_generic_args(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
parser.add_argument(
'''--max_source_length''' ,default=1_024 ,type=SCREAMING_SNAKE_CASE_ ,help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) ,)
parser.add_argument(
'''--max_target_length''' ,default=56 ,type=SCREAMING_SNAKE_CASE_ ,help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) ,)
parser.add_argument(
'''--val_max_target_length''' ,default=142 ,type=SCREAMING_SNAKE_CASE_ ,help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) ,)
parser.add_argument(
'''--test_max_target_length''' ,default=142 ,type=SCREAMING_SNAKE_CASE_ ,help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) ,)
parser.add_argument('''--freeze_encoder''' ,action='''store_true''' )
parser.add_argument('''--freeze_embeds''' ,action='''store_true''' )
parser.add_argument('''--sortish_sampler''' ,action='''store_true''' ,default=SCREAMING_SNAKE_CASE_ )
parser.add_argument('''--overwrite_output_dir''' ,action='''store_true''' ,default=SCREAMING_SNAKE_CASE_ )
parser.add_argument('''--max_tokens_per_batch''' ,type=SCREAMING_SNAKE_CASE_ ,default=SCREAMING_SNAKE_CASE_ )
parser.add_argument('''--logger_name''' ,type=SCREAMING_SNAKE_CASE_ ,choices=['''default''', '''wandb''', '''wandb_shared'''] ,default='''default''' )
parser.add_argument('''--n_train''' ,type=SCREAMING_SNAKE_CASE_ ,default=-1 ,required=SCREAMING_SNAKE_CASE_ ,help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' ,type=SCREAMING_SNAKE_CASE_ ,default=500 ,required=SCREAMING_SNAKE_CASE_ ,help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' ,type=SCREAMING_SNAKE_CASE_ ,default=-1 ,required=SCREAMING_SNAKE_CASE_ ,help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' ,type=SCREAMING_SNAKE_CASE_ ,default='''summarization''' ,required=SCREAMING_SNAKE_CASE_ ,help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' ,type=SCREAMING_SNAKE_CASE_ ,default=0.0 ,required=SCREAMING_SNAKE_CASE_ )
parser.add_argument('''--src_lang''' ,type=SCREAMING_SNAKE_CASE_ ,default='''''' ,required=SCREAMING_SNAKE_CASE_ )
parser.add_argument('''--tgt_lang''' ,type=SCREAMING_SNAKE_CASE_ ,default='''''' ,required=SCREAMING_SNAKE_CASE_ )
parser.add_argument('''--eval_beams''' ,type=SCREAMING_SNAKE_CASE_ ,default=SCREAMING_SNAKE_CASE_ ,required=SCREAMING_SNAKE_CASE_ )
parser.add_argument(
'''--val_metric''' ,type=SCREAMING_SNAKE_CASE_ ,default=SCREAMING_SNAKE_CASE_ ,required=SCREAMING_SNAKE_CASE_ ,choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' ,type=SCREAMING_SNAKE_CASE_ ,default=SCREAMING_SNAKE_CASE_ ,help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' ,type=SCREAMING_SNAKE_CASE_ ,default=1 ,required=SCREAMING_SNAKE_CASE_ ,help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' ,type=SCREAMING_SNAKE_CASE_ ,default=-1 ,required=SCREAMING_SNAKE_CASE_ ,help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) ,)
return parser
class lowercase ( lowercase__ ):
lowercase = '''translation'''
lowercase = ['''loss''']
lowercase = ['''bleu''']
lowercase = '''bleu'''
def __init__(self : Tuple ,SCREAMING_SNAKE_CASE_ : Dict ,**SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase = hparams.src_lang
lowerCAmelCase = hparams.tgt_lang
def UpperCAmelCase (self : str ,SCREAMING_SNAKE_CASE_ : Tuple ,SCREAMING_SNAKE_CASE_ : List[str] ) -> dict:
"""simple docstring"""
return calculate_bleu(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def __magic_name__ ( _lowerCamelCase: Union[str, Any], _lowerCamelCase: int=None ) -> SummarizationModule:
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=_lowerCamelCase )
check_output_dir(_lowerCamelCase, expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCAmelCase = SummarizationModule(_lowerCamelCase )
else:
lowerCAmelCase = TranslationModule(_lowerCamelCase )
lowerCAmelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
lowerCAmelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase = os.environ.get('''WANDB_PROJECT''', _lowerCamelCase )
lowerCAmelCase = WandbLogger(name=model.output_dir.name, project=_lowerCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase = WandbLogger(name=model.output_dir.name, project=F"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
lowerCAmelCase = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
lowerCAmelCase = False
lowerCAmelCase = args.val_metric == '''loss'''
lowerCAmelCase = generic_train(
_lowerCamelCase, _lowerCamelCase, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, _lowerCamelCase ), early_stopping_callback=_lowerCamelCase, logger=_lowerCamelCase, )
pickle_save(model.hparams, model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
lowerCAmelCase = ''''''
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir, '''*.ckpt''' ), recursive=_lowerCamelCase ) )
if checkpoints:
lowerCAmelCase = checkpoints[-1]
lowerCAmelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
UpperCAmelCase = parser.parse_args()
main(args)
| 535 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class lowercase ( lowercase__ ):
lowercase = '''openai-gpt'''
lowercase = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__(self : List[Any] ,SCREAMING_SNAKE_CASE_ : str=40_478 ,SCREAMING_SNAKE_CASE_ : Optional[int]=512 ,SCREAMING_SNAKE_CASE_ : Union[str, Any]=768 ,SCREAMING_SNAKE_CASE_ : Dict=12 ,SCREAMING_SNAKE_CASE_ : Dict=12 ,SCREAMING_SNAKE_CASE_ : int="gelu" ,SCREAMING_SNAKE_CASE_ : Dict=0.1 ,SCREAMING_SNAKE_CASE_ : List[Any]=0.1 ,SCREAMING_SNAKE_CASE_ : Dict=0.1 ,SCREAMING_SNAKE_CASE_ : Optional[Any]=1e-5 ,SCREAMING_SNAKE_CASE_ : List[str]=0.02 ,SCREAMING_SNAKE_CASE_ : Union[str, Any]="cls_index" ,SCREAMING_SNAKE_CASE_ : str=True ,SCREAMING_SNAKE_CASE_ : Any=None ,SCREAMING_SNAKE_CASE_ : List[Any]=True ,SCREAMING_SNAKE_CASE_ : List[str]=0.1 ,**SCREAMING_SNAKE_CASE_ : int ,) -> int:
"""simple docstring"""
lowerCAmelCase = vocab_size
lowerCAmelCase = n_positions
lowerCAmelCase = n_embd
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = afn
lowerCAmelCase = resid_pdrop
lowerCAmelCase = embd_pdrop
lowerCAmelCase = attn_pdrop
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = summary_type
lowerCAmelCase = summary_use_proj
lowerCAmelCase = summary_activation
lowerCAmelCase = summary_first_dropout
lowerCAmelCase = summary_proj_to_labels
super().__init__(**SCREAMING_SNAKE_CASE_ )
| 535 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _lowercase :
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> None:
__lowerCAmelCase = num_of_nodes
__lowerCAmelCase = []
__lowerCAmelCase = {}
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
__lowerCAmelCase = self.find_component(SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
if component_size[u_node] <= component_size[v_node]:
__lowerCAmelCase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(SCREAMING_SNAKE_CASE__ )
elif component_size[u_node] >= component_size[v_node]:
__lowerCAmelCase = self.find_component(SCREAMING_SNAKE_CASE__ )
component_size[u_node] += component_size[v_node]
self.set_component(SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> None:
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__lowerCAmelCase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = edge
__lowerCAmelCase = self.m_component[u]
__lowerCAmelCase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__lowerCAmelCase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = edge
__lowerCAmelCase = self.m_component[u]
__lowerCAmelCase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__lowerCAmelCase = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def UpperCamelCase_ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330 | '''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class _lowercase :
'''simple docstring'''
def __init__( self : Tuple ) -> Any:
__lowerCAmelCase = {}
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : str ) -> None:
__lowerCAmelCase = {}
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : float ) -> None:
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE__ )
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = probability
def a ( self : Union[str, Any] ) -> list[str]:
return list(self.connections )
def a ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> str:
__lowerCAmelCase = 0
__lowerCAmelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : list[tuple[str, str, float]] , snake_case_ : int ) -> dict[str, int]:
'''simple docstring'''
__lowerCAmelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(snake_case_ , snake_case_ , snake_case_ )
__lowerCAmelCase = Counter(graph.get_nodes() )
__lowerCAmelCase = start
for _ in range(snake_case_ ):
__lowerCAmelCase = graph.transition(snake_case_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330 | 1 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ):
super().__init__(features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = Sql(
cache_dir=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , sql=__SCREAMING_SNAKE_CASE , con=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self ):
snake_case__ : Any = None
snake_case__ : int = None
snake_case__ : Dict = None
snake_case__ : List[Any] = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , )
# Build dataset for splits
snake_case__ : List[str] = self.builder.as_dataset(
split="""train""" , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0." )
snake_case__ : Dict = dataset
snake_case__ : Any = name
snake_case__ : Tuple = con
snake_case__ : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
snake_case__ : Optional[int] = num_proc
snake_case__ : str = to_sql_kwargs
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.to_sql_kwargs.pop("""sql""" , __SCREAMING_SNAKE_CASE )
snake_case__ : int = self.to_sql_kwargs.pop("""con""" , __SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.to_sql_kwargs.pop("""index""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Any = self._write(index=__SCREAMING_SNAKE_CASE , **self.to_sql_kwargs )
return written
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ , snake_case__ , snake_case__ : List[Any] = args
snake_case__ : List[str] = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
snake_case__ : List[str] = query_table(
table=self.dataset.data , key=slice(__SCREAMING_SNAKE_CASE , offset + self.batch_size ) , indices=self.dataset._indices , )
snake_case__ : int = batch.to_pandas()
snake_case__ : Optional[Any] = df.to_sql(self.name , self.con , index=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return num_rows or len(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
snake_case__ : int = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
snake_case__ , snake_case__ : int = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 38 |
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
UpperCAmelCase = logging.get_logger(__name__)
class __magic_name__ :
def __init__( self : Tuple , snake_case__ : str = None , snake_case__ : uuid.UUID = None , snake_case__ : Optional[int]=None , snake_case__ : Tuple=None ):
'''simple docstring'''
if not conversation_id:
lowercase :List[Any] = uuid.uuida()
if past_user_inputs is None:
lowercase :Union[str, Any] = []
if generated_responses is None:
lowercase :List[str] = []
lowercase :uuid.UUID = conversation_id
lowercase :List[str] = past_user_inputs
lowercase :List[str] = generated_responses
lowercase :Optional[str] = text
def __eq__( self : Optional[Any] , snake_case__ : str ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __snake_case ( self : Optional[int] , snake_case__ : str , snake_case__ : bool = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
lowercase :List[str] = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
lowercase :Optional[int] = text
def __snake_case ( self : Any ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowercase :Tuple = None
def __snake_case ( self : Tuple , snake_case__ : str ):
'''simple docstring'''
self.generated_responses.append(snake_case__ )
def __snake_case ( self : Tuple ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Dict ):
'''simple docstring'''
lowercase :int = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
lowercase :Dict = '''user''' if is_user else '''bot'''
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
__UpperCAmelCase , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , *snake_case__ : Optional[Any] , **snake_case__ : List[Any] ):
'''simple docstring'''
super().__init__(*snake_case__ , **snake_case__ )
if self.tokenizer.pad_token_id is None:
lowercase :Any = self.tokenizer.eos_token
def __snake_case ( self : List[Any] , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=None , **snake_case__ : Union[str, Any] ):
'''simple docstring'''
lowercase :str = {}
lowercase :List[str] = {}
lowercase :Tuple = {}
if min_length_for_response is not None:
lowercase :Dict = min_length_for_response
if minimum_tokens is not None:
lowercase :Union[str, Any] = minimum_tokens
if "max_length" in generate_kwargs:
lowercase :List[Any] = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowercase :Dict = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(snake_case__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[Any] , snake_case__ : Union[Conversation, List[Conversation]] , snake_case__ : int=0 , **snake_case__ : int ):
'''simple docstring'''
lowercase :int = super().__call__(snake_case__ , num_workers=snake_case__ , **snake_case__ )
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) == 1:
return outputs[0]
return outputs
def __snake_case ( self : List[Any] , snake_case__ : Conversation , snake_case__ : Any=3_2 ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
lowercase :List[str] = self.tokenizer._build_conversation_input_ids(snake_case__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowercase :List[str] = self._legacy_parse_and_tokenize(snake_case__ )
if self.framework == "pt":
lowercase :int = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowercase :Any = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __snake_case ( self : int , snake_case__ : Union[str, Any] , snake_case__ : Any=1_0 , **snake_case__ : int ):
'''simple docstring'''
lowercase :Dict = generate_kwargs.get('''max_length''' , self.model.config.max_length )
lowercase :Optional[Any] = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
lowercase :int = max_length - minimum_tokens
lowercase :int = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
lowercase :int = model_inputs['''attention_mask'''][:, -trim:]
lowercase :int = model_inputs.pop('''conversation''' )
lowercase :Union[str, Any] = max_length
lowercase :Dict = self.model.generate(**snake_case__ , **snake_case__ )
if self.model.config.is_encoder_decoder:
lowercase :List[Any] = 1
else:
lowercase :Optional[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __snake_case ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : Optional[Any]=True ):
'''simple docstring'''
lowercase :Dict = model_outputs['''output_ids''']
lowercase :Dict = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ , )
lowercase :Optional[int] = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(snake_case__ )
return conversation
def __snake_case ( self : List[Any] , snake_case__ : Conversation ):
'''simple docstring'''
lowercase :str = self.tokenizer.eos_token_id
lowercase :List[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) )
if len(snake_case__ ) > self.tokenizer.model_max_length:
lowercase :List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 677 | 0 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowerCamelCase_ : Optional[List[str]] = None
lowerCamelCase_ : Optional[int] = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowerCamelCase_ : Optional[Any] = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class a__ :
A__ : bool = True
A__ : Optional[str] = None
# Automatically constructed
A__ : ClassVar[str] = "PIL.Image.Image"
A__ : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
A__ : str = field(default='Image' , init=__snake_case , repr=__snake_case )
def __call__( self ) -> int:
return self.pa_type
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__a = np.array(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(UpperCAmelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(UpperCAmelCase )
elif isinstance(UpperCAmelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(UpperCAmelCase )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
__a = {}
__a , __a = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(UpperCAmelCase ):
__a = PIL.Image.open(UpperCAmelCase )
else:
__a = path.split('::' )[-1]
try:
__a = string_to_dict(UpperCAmelCase , config.HUB_DATASETS_URL )['repo_id']
__a = token_per_repo_id.get(UpperCAmelCase )
except ValueError:
__a = None
with xopen(UpperCAmelCase , 'rb' , use_auth_token=UpperCAmelCase ) as f:
__a = BytesIO(f.read() )
__a = PIL.Image.open(bytes_ )
else:
__a = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __SCREAMING_SNAKE_CASE ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
__a = pa.array([None] * len(UpperCAmelCase ) , type=pa.binary() )
__a = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__a = pa.array([None] * len(UpperCAmelCase ) , type=pa.string() )
__a = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
__a = storage.field('bytes' )
else:
__a = pa.array([None] * len(UpperCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
__a = storage.field('path' )
else:
__a = pa.array([None] * len(UpperCAmelCase ) , type=pa.string() )
__a = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__a = pa.array(
[encode_np_array(np.array(UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__a = pa.array([None] * len(UpperCAmelCase ) , type=pa.string() )
__a = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(UpperCAmelCase , self.pa_type )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(UpperCAmelCase ):
with xopen(UpperCAmelCase , 'rb' ) as f:
__a = f.read()
return bytes_
__a = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__a = pa.array(
[os.path.basename(UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
__a = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(UpperCAmelCase , self.pa_type )
def lowerCAmelCase( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__a = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCAmelCase( __lowerCamelCase ):
__a = BytesIO()
if image.format in list_image_compression_formats():
__a = image.format
else:
__a = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(__lowerCamelCase , format=__lowerCamelCase )
return buffer.getvalue()
def lowerCAmelCase( __lowerCamelCase ):
if hasattr(__lowerCamelCase , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )}
def lowerCAmelCase( __lowerCamelCase ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
__a = array.dtype
__a = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
__a = dtype.kind
__a = dtype.itemsize
__a = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__a = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__a = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__a = dtype_byteorder + dtype_kind + str(__lowerCamelCase )
__a = np.dtype(__lowerCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
__a = PIL.Image.fromarray(array.astype(__lowerCamelCase ) )
return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )}
def lowerCAmelCase( __lowerCamelCase ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
__a , __a = first_non_null_value(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__lowerCamelCase , np.ndarray ):
__a = no_op_if_value_is_null(__lowerCamelCase )
return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs]
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
__a = no_op_if_value_is_null(__lowerCamelCase )
return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 246 | def lowerCAmelCase( __lowerCamelCase ):
__a = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
__a = hex_num[0] == '-'
if is_negative:
__a = hex_num[1:]
try:
__a = int(__lowerCamelCase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
__a = ''
while int_num > 0:
__a = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 246 | 1 |
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ : Optional[Any] = BertJapaneseTokenizer
A_ : int = False
A_ : Union[str, Any] = True
def _A ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : Any = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
lowerCAmelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _A ( self : List[str] , a__ : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = "こんにちは、世界。 \nこんばんは、世界。"
lowerCAmelCase__ : List[str] = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def _A ( self : str , a__ : Any ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_input_output_texts(snake_case_ )
lowerCAmelCase__ : Optional[int] = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
lowerCAmelCase__ : str = tokenizer.decode(snake_case_ , clean_up_tokenization_spaces=snake_case_ )
return text, ids
def _A ( self : List[str] ):
'''simple docstring'''
pass # TODO add if relevant
def _A ( self : Any ):
'''simple docstring'''
pass # TODO add if relevant
def _A ( self : Union[str, Any] ):
'''simple docstring'''
pass # TODO add if relevant
def _A ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ : str = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(snake_case_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(snake_case_ )
lowerCAmelCase__ : Tuple = "こんにちは、世界。\nこんばんは、世界。"
lowerCAmelCase__ : Optional[int] = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase__ : Any = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(snake_case_ , "wb" ) as handle:
pickle.dump(snake_case_ , snake_case_ )
with open(snake_case_ , "rb" ) as handle:
lowerCAmelCase__ : Tuple = pickle.load(snake_case_ )
lowerCAmelCase__ : Optional[Any] = tokenizer_new.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def _A ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _A ( self : str ):
'''simple docstring'''
try:
lowerCAmelCase__ : Tuple = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _A ( self : Any ):
'''simple docstring'''
try:
lowerCAmelCase__ : Optional[Any] = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = MecabTokenizer(do_lower_case=snake_case_ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _A ( self : Optional[Any] ):
'''simple docstring'''
try:
lowerCAmelCase__ : int = MecabTokenizer(
do_lower_case=snake_case_ , normalize_text=snake_case_ , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def _A ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = MecabTokenizer(normalize_text=snake_case_ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def _A ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(snake_case_ )
lowerCAmelCase__ : List[Any] = "こんにちは、世界。\nこんばんは、世界。"
lowerCAmelCase__ : List[str] = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(snake_case_ , "wb" ) as handle:
pickle.dump(snake_case_ , snake_case_ )
with open(snake_case_ , "rb" ) as handle:
lowerCAmelCase__ : Any = pickle.load(snake_case_ )
lowerCAmelCase__ : Union[str, Any] = tokenizer_new.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@require_sudachi
def _A ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _A ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Any = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def _A ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = SudachiTokenizer(do_lower_case=snake_case_ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = SudachiTokenizer(normalize_text=snake_case_ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = SudachiTokenizer(trim_whitespace=snake_case_ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def _A ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(snake_case_ )
lowerCAmelCase__ : Tuple = "こんにちは、世界。\nこんばんは、世界。"
lowerCAmelCase__ : Optional[int] = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(snake_case_ , "wb" ) as handle:
pickle.dump(snake_case_ , snake_case_ )
with open(snake_case_ , "rb" ) as handle:
lowerCAmelCase__ : Any = pickle.load(snake_case_ )
lowerCAmelCase__ : Optional[Any] = tokenizer_new.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@require_jumanpp
def _A ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _A ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = JumanppTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _A ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = JumanppTokenizer(normalize_text=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _A ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = JumanppTokenizer(trim_whitespace=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def _A ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def _A ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
lowerCAmelCase__ : Tuple = {}
for i, token in enumerate(snake_case_ ):
lowerCAmelCase__ : int = i
lowerCAmelCase__ : Dict = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def _A ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Any = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
lowerCAmelCase__ : Tuple = tokenizer.subword_tokenizer
lowerCAmelCase__ : str = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(snake_case_ , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
lowerCAmelCase__ : Union[str, Any] = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(snake_case_ , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def _A ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
lowerCAmelCase__ : Tuple = tokenizer.encode("ありがとう。" , add_special_tokens=snake_case_ )
lowerCAmelCase__ : Any = tokenizer.encode("どういたしまして。" , add_special_tokens=snake_case_ )
lowerCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case_ )
lowerCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ : List[str] = BertJapaneseTokenizer
A_ : Tuple = False
def _A ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : int = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
lowerCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _A ( self : Optional[Any] , **a__ : Dict ):
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **snake_case_ )
def _A ( self : Optional[Any] , a__ : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = "こんにちは、世界。 \nこんばんは、世界。"
lowerCAmelCase__ : Union[str, Any] = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def _A ( self : Optional[int] ):
'''simple docstring'''
pass # TODO add if relevant
def _A ( self : Union[str, Any] ):
'''simple docstring'''
pass # TODO add if relevant
def _A ( self : str ):
'''simple docstring'''
pass # TODO add if relevant
def _A ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
lowerCAmelCase__ : int = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
snake_case_ , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _A ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
lowerCAmelCase__ : List[Any] = {}
for i, token in enumerate(snake_case_ ):
lowerCAmelCase__ : List[Any] = i
lowerCAmelCase__ : Optional[Any] = CharacterTokenizer(vocab=snake_case_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def _A ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
lowerCAmelCase__ : List[str] = tokenizer.encode("ありがとう。" , add_special_tokens=snake_case_ )
lowerCAmelCase__ : Optional[int] = tokenizer.encode("どういたしまして。" , add_special_tokens=snake_case_ )
lowerCAmelCase__ : Dict = tokenizer.build_inputs_with_special_tokens(snake_case_ )
lowerCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
def _A ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = "cl-tohoku/bert-base-japanese"
lowerCAmelCase__ : int = AutoTokenizer.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
class lowerCAmelCase ( unittest.TestCase ):
def _A ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(snake_case_ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
lowerCAmelCase__ : int = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(snake_case_ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 378 |
'''simple docstring'''
import math
import sys
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
_UpperCAmelCase = ""
try:
with open(__lowercase , "rb" ) as binary_file:
_UpperCAmelCase = binary_file.read()
for dat in data:
_UpperCAmelCase = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
_UpperCAmelCase = {"0": "0", "1": "1"}
_UpperCAmelCase , _UpperCAmelCase = "", ""
_UpperCAmelCase = len(__lowercase )
for i in range(len(__lowercase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_UpperCAmelCase = lexicon[curr_string]
result += last_match_id
_UpperCAmelCase = last_match_id + "0"
if math.loga(__lowercase ).is_integer():
_UpperCAmelCase = {}
for curr_key in list(__lowercase ):
_UpperCAmelCase = lexicon.pop(__lowercase )
_UpperCAmelCase = new_lex
_UpperCAmelCase = last_match_id + "1"
index += 1
_UpperCAmelCase = ""
return result
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = 8
try:
with open(__lowercase , "wb" ) as opened_file:
_UpperCAmelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(__lowercase ) , __lowercase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__lowercase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def UpperCAmelCase_ ( __lowercase : str ) -> str:
'''simple docstring'''
_UpperCAmelCase = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_UpperCAmelCase = data_bits[counter:]
_UpperCAmelCase = data_bits[counter + 1 :]
return data_bits
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = read_file_binary(__lowercase )
_UpperCAmelCase = remove_prefix(__lowercase )
_UpperCAmelCase = decompress_data(__lowercase )
write_file_binary(__lowercase , __lowercase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 236 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 706 |
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase_ = list[list[float | int]]
def UpperCAmelCase ( A__ , A__ ) -> Matrix:
_snake_case : int = len(A__ )
_snake_case : Matrix = [[0 for _ in range(size + 1 )] for _ in range(A__ )]
_snake_case : int
_snake_case : int
_snake_case : int
_snake_case : int
_snake_case : int
_snake_case : float
for row in range(A__ ):
for col in range(A__ ):
_snake_case : Optional[int] = matrix[row][col]
_snake_case : Dict = vector[row][0]
_snake_case : str = 0
_snake_case : Any = 0
while row < size and col < size:
# pivoting
_snake_case : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(A__ , A__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_snake_case , _snake_case : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , A__ ):
_snake_case : Optional[int] = augmented[rowa][col] / augmented[row][col]
_snake_case : int = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , A__ ):
for row in range(A__ ):
_snake_case : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(A__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(A__ )
]
def UpperCAmelCase ( A__ ) -> Callable[[int], int]:
_snake_case : int = len(A__ )
_snake_case : Matrix = [[0 for _ in range(A__ )] for _ in range(A__ )]
_snake_case : Matrix = [[0] for _ in range(A__ )]
_snake_case : Matrix
_snake_case : int
_snake_case : int
_snake_case : int
for x_val, y_val in enumerate(A__ ):
for col in range(A__ ):
_snake_case : Any = (x_val + 1) ** (size - col - 1)
_snake_case : Dict = y_val
_snake_case : List[str] = solve(A__ , A__ )
def interpolated_func(A__ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(A__ ) )
return interpolated_func
def UpperCAmelCase ( A__ ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCAmelCase ( A__ = question_function , A__ = 10 ) -> int:
_snake_case : list[int] = [func(A__ ) for x_val in range(1 , order + 1 )]
_snake_case : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_snake_case : int = 0
_snake_case : Callable[[int], int]
_snake_case : int
for poly in polynomials:
_snake_case : int = 1
while func(A__ ) == poly(A__ ):
x_val += 1
ret += poly(A__ )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 519 | 0 |
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = 9.80665
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = g ):
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 223 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , SCREAMING_SNAKE_CASE__ , )
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 223 | 1 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def A_ ( lowercase_ , lowercase_=False ) ->Optional[Any]:
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
SCREAMING_SNAKE_CASE = default
else:
# KEY is set, convert it to True or False.
try:
SCREAMING_SNAKE_CASE = strtobool(lowercase_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
__UpperCAmelCase = parse_flag_from_env("RUN_SLOW", default=False)
__UpperCAmelCase = parse_flag_from_env("RUN_REMOTE", default=False)
__UpperCAmelCase = parse_flag_from_env("RUN_LOCAL", default=True)
__UpperCAmelCase = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
__UpperCAmelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
__UpperCAmelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
__UpperCAmelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
__UpperCAmelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ",
)
# Beam
__UpperCAmelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
__UpperCAmelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
__UpperCAmelCase = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def A_ ( lowercase_ ) ->Optional[Any]:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
SCREAMING_SNAKE_CASE = unittest.skip('test requires faiss' )(lowercase_ )
return test_case
def A_ ( lowercase_ ) ->Any:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
SCREAMING_SNAKE_CASE = unittest.skip('test requires regex' )(lowercase_ )
return test_case
def A_ ( lowercase_ ) ->str:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
SCREAMING_SNAKE_CASE = unittest.skip('test requires elasticsearch' )(lowercase_ )
return test_case
def A_ ( lowercase_ ) ->List[Any]:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
SCREAMING_SNAKE_CASE = unittest.skip('test requires sqlalchemy' )(lowercase_ )
return test_case
def A_ ( lowercase_ ) ->Any:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
SCREAMING_SNAKE_CASE = unittest.skip('test requires PyTorch' )(lowercase_ )
return test_case
def A_ ( lowercase_ ) ->List[str]:
"""simple docstring"""
if not config.TF_AVAILABLE:
SCREAMING_SNAKE_CASE = unittest.skip('test requires TensorFlow' )(lowercase_ )
return test_case
def A_ ( lowercase_ ) ->Union[str, Any]:
"""simple docstring"""
if not config.JAX_AVAILABLE:
SCREAMING_SNAKE_CASE = unittest.skip('test requires JAX' )(lowercase_ )
return test_case
def A_ ( lowercase_ ) ->List[Any]:
"""simple docstring"""
if not config.PIL_AVAILABLE:
SCREAMING_SNAKE_CASE = unittest.skip('test requires Pillow' )(lowercase_ )
return test_case
def A_ ( lowercase_ ) ->Optional[Any]:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(lowercase_ )
else:
return test_case
def A_ ( lowercase_ ) ->Optional[Any]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(lowercase_ )
else:
return test_case
def A_ ( lowercase_ ) ->Optional[int]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(lowercase_ )
else:
return test_case
def A_ ( lowercase_ ) ->List[str]:
"""simple docstring"""
def _require_spacy_model(lowercase_ ):
try:
import spacy # noqa F401
spacy.load(lowercase_ )
except ImportError:
return unittest.skip('test requires spacy' )(lowercase_ )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(lowercase_ ) )(lowercase_ )
else:
return test_case
return _require_spacy_model
def A_ ( lowercase_ ) ->List[str]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(lowercase_ )
else:
return test_case
def A_ ( lowercase_ ) ->str:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(lowercase_ )
else:
return test_case
def A_ ( lowercase_ ) ->List[str]:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
SCREAMING_SNAKE_CASE = unittest.skip('test is slow' )(lowercase_ )
return test_case
def A_ ( lowercase_ ) ->int:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
SCREAMING_SNAKE_CASE = unittest.skip('test is local' )(lowercase_ )
return test_case
def A_ ( lowercase_ ) ->Tuple:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
SCREAMING_SNAKE_CASE = unittest.skip('test is packaged' )(lowercase_ )
return test_case
def A_ ( lowercase_ ) ->Tuple:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
SCREAMING_SNAKE_CASE = unittest.skip('test requires remote' )(lowercase_ )
return test_case
def A_ ( *lowercase_ ) ->Tuple:
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(lowercase_ ) and name.startswith('test' ):
for decorator in decorators:
SCREAMING_SNAKE_CASE = decorator(lowercase_ )
setattr(cls , lowercase_ , lowercase_ )
return cls
return decorate
class a_( lowercase__ ):
"""simple docstring"""
pass
class a_( lowercase__ ):
"""simple docstring"""
__snake_case : List[Any] =0
__snake_case : Any =1
__snake_case : str =2
@contextmanager
def A_ ( lowercase_=OfflineSimulationMode.CONNECTION_FAILS , lowercase_=1e-16 ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = requests.Session().request
def timeout_request(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
# Change the url to an invalid url so that the connection hangs
SCREAMING_SNAKE_CASE = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
SCREAMING_SNAKE_CASE = timeout
try:
return online_request(lowercase_ , lowercase_ , **lowercase_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
SCREAMING_SNAKE_CASE = url
SCREAMING_SNAKE_CASE = e.args[0]
SCREAMING_SNAKE_CASE = (max_retry_error.args[0].replace('10.255.255.1' , f'''OfflineMock[{url}]''' ),)
SCREAMING_SNAKE_CASE = (max_retry_error,)
raise
def raise_connection_error(lowercase_ , lowercase_ , **lowercase_ ):
raise requests.ConnectionError('Offline mode is enabled.' , request=lowercase_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , lowercase_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , lowercase_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , lowercase_ ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def A_ ( *lowercase_ , **lowercase_ ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = str(Path().resolve() )
with tempfile.TemporaryDirectory(*lowercase_ , **lowercase_ ) as tmp_dir:
try:
os.chdir(lowercase_ )
yield
finally:
os.chdir(lowercase_ )
@contextmanager
def A_ ( ) ->Optional[Any]:
"""simple docstring"""
import gc
gc.collect()
SCREAMING_SNAKE_CASE = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def A_ ( ) ->Optional[int]:
"""simple docstring"""
import gc
gc.collect()
SCREAMING_SNAKE_CASE = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def A_ ( lowercase_ , lowercase_ ) ->Optional[int]:
"""simple docstring"""
return deepcopy(lowercase_ ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(lowercase_ ).integers(0 , 1_0_0 , 1_0 ).tolist()
def A_ ( lowercase_ ) ->Optional[int]:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowercase_ , *lowercase_ , **lowercase_ ):
try:
return func(*lowercase_ , **lowercase_ )
except HTTPError as err:
if str(lowercase_ ).startswith('500' ) or str(lowercase_ ).startswith('502' ):
pytest.xfail(str(lowercase_ ) )
raise err
return decorator.decorator(_wrapper , lowercase_ )
class a_:
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = returncode
SCREAMING_SNAKE_CASE = stdout
SCREAMING_SNAKE_CASE = stderr
async def A_ ( lowercase_ , lowercase_ ) ->int:
"""simple docstring"""
while True:
SCREAMING_SNAKE_CASE = await stream.readline()
if line:
callback(lowercase_ )
else:
break
async def A_ ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=False , lowercase_=False ) ->int:
"""simple docstring"""
if echo:
print('\nRunning: ' , ' '.join(lowercase_ ) )
SCREAMING_SNAKE_CASE = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowercase_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowercase_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
def tee(lowercase_ , lowercase_ , lowercase_ , lowercase_="" ):
SCREAMING_SNAKE_CASE = line.decode('utf-8' ).rstrip()
sink.append(lowercase_ )
if not quiet:
print(lowercase_ , lowercase_ , file=lowercase_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda lowercase_ : tee(lowercase_ , lowercase_ , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda lowercase_ : tee(lowercase_ , lowercase_ , sys.stderr , label='stderr:' ) ),
] , timeout=lowercase_ , )
return _RunOutput(await p.wait() , lowercase_ , lowercase_ )
def A_ ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=1_8_0 , lowercase_=False , lowercase_=True ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = asyncio.get_event_loop()
SCREAMING_SNAKE_CASE = loop.run_until_complete(
_stream_subprocess(lowercase_ , env=lowercase_ , stdin=lowercase_ , timeout=lowercase_ , quiet=lowercase_ , echo=lowercase_ ) )
SCREAMING_SNAKE_CASE = ' '.join(lowercase_ )
if result.returncode > 0:
SCREAMING_SNAKE_CASE = '\n'.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def A_ ( ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
SCREAMING_SNAKE_CASE = re.sub(r'^gw' , '' , lowercase_ , 0 , re.M )
return int(lowercase_ )
def A_ ( ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 2_9_5_0_0
SCREAMING_SNAKE_CASE = pytest_xdist_worker_id()
return port + uniq_delta
| 702 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class a_( unittest.TestCase ):
"""simple docstring"""
def __UpperCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'hf-internal-testing/tiny-random-t5'
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tokenizer('This is me' , return_tensors='pt')
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules()))
SCREAMING_SNAKE_CASE = model.generate(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules()))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__)
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules()))
SCREAMING_SNAKE_CASE = model_reloaded.generate(**lowerCAmelCase__)
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__))
def __UpperCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'hf-internal-testing/tiny-random-t5'
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCAmelCase__):
model.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
model.save_pretrained(lowerCAmelCase__)
| 259 | 0 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a = logging.get_logger(__name__)
a = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'bart'
_a = ['past_key_values']
_a = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : List[str] , lowerCAmelCase : Any=5_0265 , lowerCAmelCase : Dict=1024 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : str=4096 , lowerCAmelCase : List[str]=16 , lowerCAmelCase : Any=12 , lowerCAmelCase : List[Any]=4096 , lowerCAmelCase : List[str]=16 , lowerCAmelCase : Tuple=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Tuple="gelu" , lowerCAmelCase : int=1024 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : int=0.02 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : str=False , lowerCAmelCase : Dict=True , lowerCAmelCase : int=3 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : int=0 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : str=2 , **lowerCAmelCase : int , ):
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = d_model
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = classifier_dropout
lowerCAmelCase = use_cache
lowerCAmelCase = encoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __UpperCAmelCase ):
lowerCAmelCase = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
class SCREAMING_SNAKE_CASE__ ( _a ):
@property
def __lowercase ( self : Any ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowerCAmelCase = {0: """batch"""}
lowerCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowerCAmelCase , lowerCAmelCase = self.num_layers
for i in range(__UpperCAmelCase ):
lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
else:
lowerCAmelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def __lowercase ( self : Optional[int] ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase = super().outputs
else:
lowerCAmelCase = super(__UpperCAmelCase , self ).outputs
if self.use_past:
lowerCAmelCase , lowerCAmelCase = self.num_layers
for i in range(__UpperCAmelCase ):
lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
lowerCAmelCase = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def __lowercase ( self : int , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , ):
lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Generate decoder inputs
lowerCAmelCase = seq_length if not self.use_past else 1
lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowerCAmelCase = dict(**__UpperCAmelCase , **__UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCAmelCase , lowerCAmelCase = common_inputs["""input_ids"""].shape
lowerCAmelCase = common_inputs["""decoder_input_ids"""].shape[1]
lowerCAmelCase , lowerCAmelCase = self.num_attention_heads
lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase = decoder_seq_length + 3
lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCAmelCase = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 )
lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCAmelCase , lowerCAmelCase = self.num_layers
lowerCAmelCase = min(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers
lowerCAmelCase = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(__UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
) )
# TODO: test this.
lowerCAmelCase = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(__UpperCAmelCase , __UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) )
return common_inputs
def __lowercase ( self : Optional[Any] , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , ):
lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCAmelCase , lowerCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCAmelCase = seqlen + 2
lowerCAmelCase , lowerCAmelCase = self.num_layers
lowerCAmelCase , lowerCAmelCase = self.num_attention_heads
lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase = common_inputs["""attention_mask"""].dtype
lowerCAmelCase = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
lowerCAmelCase = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase )
]
return common_inputs
def __lowercase ( self : List[Any] , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , ):
lowerCAmelCase = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase = tokenizer.num_special_tokens_to_add(__UpperCAmelCase )
lowerCAmelCase = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCAmelCase = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) )
return common_inputs
def __lowercase ( self : Optional[Any] , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
elif self.task == "causal-lm":
lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
else:
lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
return common_inputs
def __lowercase ( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
lowerCAmelCase = super(__UpperCAmelCase , self )._flatten_past_key_values_(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
| 169 |
from __future__ import annotations
UpperCAmelCase__ = list[tuple[int, int]]
UpperCAmelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class lowercase_ :
'''simple docstring'''
def __init__( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : float , __UpperCAmelCase : Node | None , ) ->int:
"""simple docstring"""
a = pos_x
a = pos_y
a = (pos_y, pos_x)
a = goal_x
a = goal_y
a = g_cost
a = parent
a = self.calculate_heuristic()
def __lowerCAmelCase ( self : Any ) ->float:
"""simple docstring"""
a = abs(self.pos_x - self.goal_x )
a = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Any , __UpperCAmelCase : Tuple ) ->bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , __UpperCAmelCase : tuple[int, int] , __UpperCAmelCase : tuple[int, int] ) ->Dict:
"""simple docstring"""
a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __UpperCAmelCase )
a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , __UpperCAmelCase )
a = [self.start]
a = []
a = False
def __lowerCAmelCase ( self : str ) ->Path | None:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
a = True
return self.retrace_path(__UpperCAmelCase )
self.closed_nodes.append(__UpperCAmelCase )
a = self.get_successors(__UpperCAmelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__UpperCAmelCase )
else:
# retrieve the best current path
a = self.open_nodes.pop(self.open_nodes.index(__UpperCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__UpperCAmelCase )
else:
self.open_nodes.append(__UpperCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : Node ) ->list[Node]:
"""simple docstring"""
a = []
for action in delta:
a = parent.pos_x + action[1]
a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__UpperCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__UpperCAmelCase , __UpperCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __UpperCAmelCase , ) )
return successors
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Node | None ) ->Path:
"""simple docstring"""
a = node
a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
a = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
UpperCAmelCase__ = (0, 0)
UpperCAmelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
UpperCAmelCase__ = GreedyBestFirst(init, goal)
UpperCAmelCase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
UpperCAmelCase__ = 2
for elem in grid:
print(elem)
| 117 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 666 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case__ = Features({"image": Image()} )
snake_case__ = Features({"labels": ClassLabel} )
snake_case__ = "image"
snake_case__ = "labels"
def SCREAMING_SNAKE_CASE ( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __SCREAMING_SNAKE_CASE ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
a_ : Optional[int] = copy.deepcopy(self )
a_ : int = self.label_schema.copy()
a_ : Tuple = features[self.label_column]
a_ : str = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 666 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = 'timesformer'
def __init__( self ,a_=224 ,a_=16 ,a_=3 ,a_=8 ,a_=768 ,a_=12 ,a_=12 ,a_=3072 ,a_="gelu" ,a_=0.0 ,a_=0.0 ,a_=0.02 ,a_=1e-6 ,a_=True ,a_="divided_space_time" ,a_=0 ,**a_ ,):
"""simple docstring"""
super().__init__(**a_ )
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = num_frames
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = qkv_bias
lowerCAmelCase__ = attention_type
lowerCAmelCase__ = drop_path_rate
| 193 |
from typing import Dict
from .base import GenericTensor, Pipeline
class __snake_case ( SCREAMING_SNAKE_CASE ):
def SCREAMING_SNAKE_CASE_ ( self ,a_=None ,a_=None ,a_=None ,**a_ ):
"""simple docstring"""
if tokenize_kwargs is None:
lowerCAmelCase__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
lowerCAmelCase__ = truncation
lowerCAmelCase__ = tokenize_kwargs
lowerCAmelCase__ = {}
if return_tensors is not None:
lowerCAmelCase__ = return_tensors
return preprocess_params, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,**a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.framework
lowerCAmelCase__ = self.tokenizer(a_ ,return_tensors=a_ ,**a_ )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.model(**a_ )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_=False ):
"""simple docstring"""
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self ,*a_ ,**a_ ):
"""simple docstring"""
return super().__call__(*a_ ,**a_ )
| 193 | 1 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCAmelCase = HUGGINGFACE_HUB_CACHE
lowerCAmelCase = 'config.json'
lowerCAmelCase = 'diffusion_pytorch_model.bin'
lowerCAmelCase = 'diffusion_flax_model.msgpack'
lowerCAmelCase = 'model.onnx'
lowerCAmelCase = 'diffusion_pytorch_model.safetensors'
lowerCAmelCase = 'weights.pb'
lowerCAmelCase = 'https://huggingface.co'
lowerCAmelCase = default_cache_path
lowerCAmelCase = 'diffusers_modules'
lowerCAmelCase = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
lowerCAmelCase = ['fp16', 'non-ema']
lowerCAmelCase = '.self_attn'
| 429 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
lowerCAmelCase = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 429 | 1 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__a: int = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> List[Any]:
_UpperCAmelCase = RobertaPreLayerNormConfig.from_pretrained(
__snake_case , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
_UpperCAmelCase = torch.load(hf_hub_download(repo_id=__snake_case , filename="""pytorch_model.bin""" ) )
_UpperCAmelCase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
_UpperCAmelCase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
_UpperCAmelCase = tensor_value
_UpperCAmelCase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case )
model.save_pretrained(__snake_case )
# convert tokenizer
_UpperCAmelCase = AutoTokenizer.from_pretrained(__snake_case )
tokenizer.save_pretrained(__snake_case )
if __name__ == "__main__":
__a: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__a: Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 108 |
"""simple docstring"""
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def __lowercase ( snake_case_ : Optional[int] ) ->str:
'''simple docstring'''
__A : Tuple = tmp_path / '''file.csv'''
__A : Union[str, Any] = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(snake_case_ ,'''w''' ) as f:
f.write(snake_case_ )
return str(snake_case_ )
@pytest.fixture
def __lowercase ( snake_case_ : str ) ->Optional[Any]:
'''simple docstring'''
__A : List[str] = tmp_path / '''malformed_file.csv'''
__A : Optional[Any] = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(snake_case_ ,'''w''' ) as f:
f.write(snake_case_ )
return str(snake_case_ )
@pytest.fixture
def __lowercase ( snake_case_ : Any ,snake_case_ : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
__A : List[str] = tmp_path / '''csv_with_image.csv'''
__A : List[str] = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(snake_case_ ,'''w''' ) as f:
f.write(snake_case_ )
return str(snake_case_ )
@pytest.fixture
def __lowercase ( snake_case_ : Optional[int] ) ->Tuple:
'''simple docstring'''
__A : Dict = tmp_path / '''csv_with_label.csv'''
__A : int = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(snake_case_ ,'''w''' ) as f:
f.write(snake_case_ )
return str(snake_case_ )
@pytest.fixture
def __lowercase ( snake_case_ : Dict ) ->Optional[Any]:
'''simple docstring'''
__A : Optional[int] = tmp_path / '''csv_with_int_list.csv'''
__A : Union[str, Any] = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(snake_case_ ,'''w''' ) as f:
f.write(snake_case_ )
return str(snake_case_ )
def __lowercase ( snake_case_ : List[str] ,snake_case_ : Optional[int] ,snake_case_ : Optional[Any] ) ->Dict:
'''simple docstring'''
__A : str = Csv()
__A : List[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(snake_case_ ,match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(snake_case_ ) in record.message
for record in caplog.records )
@require_pil
def __lowercase ( snake_case_ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
with open(snake_case_ ,encoding='''utf-8''' ) as f:
__A : int = f.read().splitlines()[1]
__A : Optional[Any] = Csv(encoding='''utf-8''' ,features=Features({'''image''': Image()} ) )
__A : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
__A : Optional[Any] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
__A : Optional[int] = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def __lowercase ( snake_case_ : Any ) ->List[Any]:
'''simple docstring'''
with open(snake_case_ ,encoding='''utf-8''' ) as f:
__A : Optional[int] = f.read().splitlines()[1:]
__A : List[Any] = Csv(encoding='''utf-8''' ,features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
__A : str = csv._generate_tables([[csv_file_with_label]] )
__A : Any = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
__A : Optional[Any] = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(snake_case_ ) for label in labels]
def __lowercase ( snake_case_ : str ) ->Dict:
'''simple docstring'''
__A : Optional[int] = Csv(encoding='''utf-8''' ,sep=''',''' ,converters={'''int_list''': lambda snake_case_ : [int(snake_case_ ) for i in x.split()]} )
__A : Dict = csv._generate_tables([[csv_file_with_int_list]] )
__A : Dict = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
__A : int = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 177 | 0 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _lowerCamelCase( UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> str:
A : str = 0
if start < end:
A : Optional[Any] = randint(UpperCamelCase__ , UpperCamelCase__ )
A : Optional[int] = a[end]
A : List[str] = a[pivot]
A : Dict = temp
A, A : str = _in_place_partition(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
count += _in_place_quick_sort(UpperCamelCase__ , UpperCamelCase__ , p - 1 )
count += _in_place_quick_sort(UpperCamelCase__ , p + 1 , UpperCamelCase__ )
return count
def _lowerCamelCase( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple ) -> Optional[Any]:
A : Tuple = 0
A : Optional[int] = randint(UpperCamelCase__ , UpperCamelCase__ )
A : Union[str, Any] = a[end]
A : Optional[Any] = a[pivot]
A : Dict = temp
A : Optional[Any] = start - 1
for index in range(UpperCamelCase__ , UpperCamelCase__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
A : List[str] = new_pivot_index + 1
A : Union[str, Any] = a[new_pivot_index]
A : Optional[Any] = a[index]
A : Optional[Any] = temp
A : str = a[new_pivot_index + 1]
A : Any = a[end]
A : Optional[int] = temp
return new_pivot_index + 1, count
snake_case_ = TemporaryFile()
snake_case_ = 1_00 # 1000 elements are to be sorted
snake_case_ , snake_case_ = 0, 1 # mean and standard deviation
snake_case_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
snake_case_ = np.load(outfile)
snake_case_ = len(M) - 1
snake_case_ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 537 |
'''simple docstring'''
import requests
snake_case_ = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def _lowerCamelCase( UpperCamelCase__ : str ) -> None:
# fetching a list of articles in json format
A : Any = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F'''{i}.) {article['title']}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
| 537 | 1 |
__a : Dict = range(2, 2_0 + 1)
__a : str = [1_0**k for k in range(ks[-1] + 1)]
__a : List[str] = {}
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
lowercase__ : Union[str, Any] = sum(a_i[j] for j in range(snake_case__ ,len(snake_case__ ) ) )
lowercase__ : Dict = sum(a_i[j] * base[j] for j in range(min(len(snake_case__ ) ,snake_case__ ) ) )
lowercase__ : str = 0, 0
lowercase__ : Optional[Any] = n - i
lowercase__ : int = memo.get(snake_case__ )
if sub_memo is not None:
lowercase__ : Any = sub_memo.get(snake_case__ )
if jumps is not None and len(snake_case__ ) > 0:
# find and make the largest jump without going over
lowercase__ : Optional[Any] = -1
for _k in range(len(snake_case__ ) - 1 ,-1 ,-1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowercase__ : Tuple = _k
break
if max_jump >= 0:
lowercase__ : Tuple = jumps[max_jump]
# since the difference between jumps is cached, add c
lowercase__ : Optional[int] = diff + c
for j in range(min(snake_case__ ,len(snake_case__ ) ) ):
lowercase__ : Union[str, Any] = divmod(snake_case__ ,10 )
if new_c > 0:
add(snake_case__ ,snake_case__ ,snake_case__ )
else:
lowercase__ : Tuple = []
else:
lowercase__ : List[Any] = {c: []}
lowercase__ : str = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowercase__ : List[Any] = next_term(snake_case__ ,k - 1 ,i + dn ,snake_case__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowercase__ : str = compute(snake_case__ ,snake_case__ ,i + dn ,snake_case__ )
diff += _diff
dn += terms_jumped
lowercase__ : List[Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowercase__ : Union[str, Any] = 0
while j < len(snake_case__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(snake_case__ ,(diff, dn, k) )
return (diff, dn)
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> int:
if i >= n:
return 0, i
if k > len(snake_case__ ):
a_i.extend([0 for _ in range(k - len(snake_case__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowercase__ : Optional[int] = i
lowercase__ : Optional[int] = 0, 0, 0
for j in range(len(snake_case__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowercase__ : Tuple = ds_c + ds_b
diff += addend
lowercase__ : int = 0
for j in range(snake_case__ ):
lowercase__ : Dict = a_i[j] + addend
lowercase__ : str = divmod(snake_case__ ,10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(snake_case__ ,snake_case__ ,snake_case__ )
return diff, i - start_i
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> str:
for j in range(snake_case__ ,len(snake_case__ ) ):
lowercase__ : List[str] = digits[j] + addend
if s >= 10:
lowercase__ : Dict = divmod(snake_case__ ,10 )
lowercase__ : List[Any] = addend // 10 + quotient
else:
lowercase__ : Optional[int] = s
lowercase__ : int = addend // 10
if addend == 0:
break
while addend > 0:
lowercase__ : List[Any] = divmod(snake_case__ ,10 )
digits.append(snake_case__ )
def snake_case_ ( SCREAMING_SNAKE_CASE_ = 10**15 ) -> Dict:
lowercase__ : Optional[Any] = [1]
lowercase__ : Tuple = 1
lowercase__ : Union[str, Any] = 0
while True:
lowercase__ : Optional[int] = next_term(snake_case__ ,20 ,i + dn ,snake_case__ )
dn += terms_jumped
if dn == n - i:
break
lowercase__ : Tuple = 0
for j in range(len(snake_case__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }') | 397 |
def UpperCamelCase ( snake_case__ : float ,snake_case__ : int ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(snake_case__ ) ,snake_case__ )
return number - int(snake_case__ )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3))
| 455 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCAmelCase_ ( __A : Dict ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
@staticmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Tuple = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=SCREAMING_SNAKE_CASE__ , help='Name of the model to download' )
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Union[str, Any] = model
snake_case: Dict = cache
snake_case: Any = force
snake_case: Optional[Any] = trust_remote_code
def _UpperCamelCase ( self ):
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) | 721 |
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ ( __A : Tuple ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Union[str, Any] = 'mock-s3-bucket'
snake_case: int = f"""s3://{mock_bucket}"""
snake_case: Any = extract_path_from_uri(__A )
assert dataset_path.startswith('s3://' ) is False
snake_case: Union[str, Any] = './local/path'
snake_case: Union[str, Any] = extract_path_from_uri(__A )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ ( __A : Any ):
'''simple docstring'''
snake_case: List[str] = is_remote_filesystem(__A )
assert is_remote is True
snake_case: int = fsspec.filesystem('file' )
snake_case: int = is_remote_filesystem(__A )
assert is_remote is False
@pytest.mark.parametrize('compression_fs_class' , __A )
def lowerCAmelCase_ ( __A : Optional[int] , __A : int , __A : str , __A : Optional[Any] , __A : List[str] , __A : Optional[Any] , __A : Optional[int] ):
'''simple docstring'''
snake_case: Optional[Any] = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file}
snake_case: Optional[int] = input_paths[compression_fs_class.protocol]
if input_path is None:
snake_case: str = f"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
snake_case: List[str] = fsspec.filesystem(compression_fs_class.protocol , fo=__A )
assert isinstance(__A , __A )
snake_case: Any = os.path.basename(__A )
snake_case: int = expected_filename[: expected_filename.rindex('.' )]
assert fs.glob('*' ) == [expected_filename]
with fs.open(__A , 'r' , encoding='utf-8' ) as f, open(__A , encoding='utf-8' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('protocol' , ['zip', 'gzip'] )
def lowerCAmelCase_ ( __A : Any , __A : int , __A : int ):
'''simple docstring'''
snake_case: List[str] = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path}
snake_case: str = compressed_file_paths[protocol]
snake_case: Dict = 'dataset.jsonl'
snake_case: Optional[Any] = f"""{protocol}://{member_file_path}::{compressed_file_path}"""
snake_case , *snake_case: List[Any] = fsspec.get_fs_token_paths(__A )
assert fs.isfile(__A )
assert not fs.isfile('non_existing_' + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ ( __A : Optional[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : List[Any] ):
'''simple docstring'''
snake_case: Tuple = hf_api.dataset_info(__A , token=__A )
snake_case: List[str] = HfFileSystem(repo_info=__A , token=__A )
assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"]
assert hffs.isdir('data' )
assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' )
with open(__A ) as f:
assert hffs.open('data/text_data.txt' , 'r' ).read() == f.read()
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: Union[str, Any] = 'bz2'
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__A , __A , clobber=__A )
with pytest.warns(__A ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__A ) == 1
assert (
str(warning_info[0].message )
== f"""A filesystem protocol was already set for {protocol} and will be overwritten."""
) | 692 | 0 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def __init__( self : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : int = None , __UpperCamelCase : int = None , __UpperCamelCase : int = None , __UpperCamelCase : Dict = False , __UpperCamelCase : Any = False , __UpperCamelCase : Union[str, Any] = None , __UpperCamelCase : int = None , **__UpperCamelCase : Any , ) -> Union[str, Any]:
super().__init__(
__A , split=__A , features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , )
A = field
A = path_or_paths if isinstance(__A , __A ) else {self.split: path_or_paths}
A = Json(
cache_dir=__A , data_files=__A , features=__A , field=__A , **__A , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
# Build iterable dataset
if self.streaming:
A = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A = None
A = None
A = None
A = None
self.builder.download_and_prepare(
download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , )
A = self.builder.as_dataset(
split=self.split , verification_mode=__A , in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase__ :
def __init__( self : int , __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] = None , __UpperCamelCase : Union[str, Any] = None , **__UpperCamelCase : Union[str, Any] , ) -> Tuple:
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
A = dataset
A = path_or_buf
A = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A = num_proc
A = 'utf-8'
A = to_json_kwargs
def __UpperCamelCase ( self : str ) -> Any:
A = self.to_json_kwargs.pop('path_or_buf' , __A )
A = self.to_json_kwargs.pop('orient' , 'records' )
A = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False )
A = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True )
A = self.to_json_kwargs.pop('compression' , __A )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , 'wb' , compression=__A ) as buffer:
A = self._write(file_obj=__A , orient=__A , lines=__A , index=__A , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
' was passed. Please provide a local path instead.' )
A = self._write(
file_obj=self.path_or_buf , orient=__A , lines=__A , index=__A , **self.to_json_kwargs )
return written
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : int ) -> Tuple:
A , A , A , A , A = args
A = query_table(
table=self.dataset.data , key=slice(__A , offset + self.batch_size ) , indices=self.dataset._indices , )
A = batch.to_pandas().to_json(
path_or_buf=__A , orient=__A , lines=__A , index=__A , **__A )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def __UpperCamelCase ( self : int , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : List[str] , **__UpperCamelCase : Tuple , ) -> Any:
A = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
A = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__A )
else:
A , A = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __A , __A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
written += file_obj.write(__A )
return written | 106 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self , __A , __A=7 , __A=3 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=1 / 255 , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , __A=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = min_resolution
__UpperCAmelCase = max_resolution
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = do_rescale
__UpperCAmelCase = rescale_factor
__UpperCAmelCase = do_normalize
__UpperCAmelCase = image_mean
__UpperCAmelCase = image_std
__UpperCAmelCase = do_pad
def __lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , __A , __A=False ):
if not batched:
__UpperCAmelCase = image_inputs[0]
if isinstance(__A , Image.Image ):
__UpperCAmelCase , __UpperCAmelCase = image.size
else:
__UpperCAmelCase , __UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
__UpperCAmelCase = int(self.size['shortest_edge'] * h / w )
__UpperCAmelCase = self.size['shortest_edge']
elif w > h:
__UpperCAmelCase = self.size['shortest_edge']
__UpperCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
__UpperCAmelCase = self.size['shortest_edge']
__UpperCAmelCase = self.size['shortest_edge']
else:
__UpperCAmelCase = []
for image in image_inputs:
__UpperCAmelCase , __UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCAmelCase = max(__A , key=lambda __A : item[0] )[0]
__UpperCAmelCase = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
_A : Any = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
__UpperCAmelCase = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , 'image_mean' ) )
self.assertTrue(hasattr(__A , 'image_std' ) )
self.assertTrue(hasattr(__A , 'do_normalize' ) )
self.assertTrue(hasattr(__A , 'do_rescale' ) )
self.assertTrue(hasattr(__A , 'rescale_factor' ) )
self.assertTrue(hasattr(__A , 'do_resize' ) )
self.assertTrue(hasattr(__A , 'size' ) )
self.assertTrue(hasattr(__A , 'do_pad' ) )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad , __A )
__UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , __A )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A , batched=__A )
__UpperCAmelCase = image_processing(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase = image_processing(__A , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase = image_processing(__A , return_tensors='pt' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
# prepare image and target
__UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__UpperCAmelCase = json.loads(f.read() )
__UpperCAmelCase = {'image_id': 39_769, 'annotations': target}
# encode them
__UpperCAmelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
__UpperCAmelCase = image_processing(images=__A , annotations=__A , return_tensors='pt' )
# verify pixel values
__UpperCAmelCase = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , __A )
__UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __A , atol=1E-4 ) )
# verify area
__UpperCAmelCase = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __A ) )
# verify boxes
__UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __A )
__UpperCAmelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __A , atol=1E-3 ) )
# verify image_id
__UpperCAmelCase = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __A ) )
# verify is_crowd
__UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __A ) )
# verify class_labels
__UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __A ) )
# verify orig_size
__UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __A ) )
# verify size
__UpperCAmelCase = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __A ) )
@slow
def __lowerCamelCase ( self ):
# prepare image, target and masks_path
__UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__UpperCAmelCase = json.loads(f.read() )
__UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
__UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__UpperCAmelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
__UpperCAmelCase = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors='pt' )
# verify pixel values
__UpperCAmelCase = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , __A )
__UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __A , atol=1E-4 ) )
# verify area
__UpperCAmelCase = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __A ) )
# verify boxes
__UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __A )
__UpperCAmelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __A , atol=1E-3 ) )
# verify image_id
__UpperCAmelCase = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __A ) )
# verify is_crowd
__UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __A ) )
# verify class_labels
__UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __A ) )
# verify masks
__UpperCAmelCase = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __A )
# verify orig_size
__UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __A ) )
# verify size
__UpperCAmelCase = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __A ) )
| 126 | 0 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def _A ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _A ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = 1
lowerCAmelCase__ : Union[str, Any] = 3
lowerCAmelCase__ : Any = (32, 32)
lowerCAmelCase__ : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a__ )
return image
@property
def _A ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _A ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _A ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(a__ )
@property
def _A ( self : int ):
'''simple docstring'''
def extract(*a__ : List[Any] , **a__ : int ):
class lowerCAmelCase :
def __init__( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = torch.ones([0] )
def _A ( self : str , a__ : str ):
'''simple docstring'''
self.pixel_values.to(a__ )
return self
return Out()
return extract
def _A ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Union[str, Any] = self.dummy_cond_unet
lowerCAmelCase__ : Union[str, Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a__ , set_alpha_to_one=a__ , )
lowerCAmelCase__ : Optional[Any] = self.dummy_vae
lowerCAmelCase__ : Tuple = self.dummy_text_encoder
lowerCAmelCase__ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ : List[Any] = StableDiffusionPipeline(
unet=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , safety_checker=a__ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase__ : Dict = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : List[str] = "A painting of a squirrel eating a burger"
lowerCAmelCase__ : str = torch.Generator(device=a__ ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = sd_pipe([prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = sd_pipe(
[prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=a__ , )[0]
lowerCAmelCase__ : Any = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Any = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : int = self.dummy_cond_unet
lowerCAmelCase__ : Optional[int] = PNDMScheduler(skip_prk_steps=a__ )
lowerCAmelCase__ : Optional[int] = self.dummy_vae
lowerCAmelCase__ : Tuple = self.dummy_text_encoder
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ : List[str] = StableDiffusionPipeline(
unet=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , safety_checker=a__ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase__ : Optional[int] = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : Tuple = "A painting of a squirrel eating a burger"
lowerCAmelCase__ : str = torch.Generator(device=a__ ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = sd_pipe([prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : Tuple = torch.Generator(device=a__ ).manual_seed(0 )
lowerCAmelCase__ : Tuple = sd_pipe(
[prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=a__ , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Optional[Any] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=a__ )
assert isinstance(a__ , a__ )
assert isinstance(pipe.scheduler , a__ )
assert pipe.safety_checker is None
lowerCAmelCase__ : List[Any] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a__ )
lowerCAmelCase__ : List[Any] = StableDiffusionPipeline.from_pretrained(a__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCAmelCase__ : List[Any] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _A ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.dummy_cond_unet
lowerCAmelCase__ : Tuple = PNDMScheduler(skip_prk_steps=a__ )
lowerCAmelCase__ : Dict = self.dummy_vae
lowerCAmelCase__ : Optional[int] = self.dummy_text_encoder
lowerCAmelCase__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
lowerCAmelCase__ : List[Any] = unet.half()
lowerCAmelCase__ : Dict = vae.half()
lowerCAmelCase__ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ : List[str] = StableDiffusionPipeline(
unet=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , safety_checker=a__ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase__ : str = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : Dict = "A painting of a squirrel eating a burger"
lowerCAmelCase__ : Any = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def _A ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=a__ )
lowerCAmelCase__ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase__ : Tuple = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : Any = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
lowerCAmelCase__ : List[str] = 40_0366_0346
lowerCAmelCase__ : int = 7
# without safety guidance (sld_guidance_scale = 0)
lowerCAmelCase__ : Dict = torch.manual_seed(a__ )
lowerCAmelCase__ : List[str] = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCAmelCase__ : List[Any] = output.images
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
lowerCAmelCase__ : Tuple = torch.manual_seed(a__ )
lowerCAmelCase__ : Tuple = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase__ : List[Any] = output.images
lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : List[str] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=a__ )
lowerCAmelCase__ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCAmelCase__ : Optional[Any] = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : int = "padme amidala taking a bath artwork, safe for work, no nudity"
lowerCAmelCase__ : Union[str, Any] = 27_3497_1755
lowerCAmelCase__ : str = 7
lowerCAmelCase__ : Any = torch.manual_seed(a__ )
lowerCAmelCase__ : Dict = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : Any = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a__ )
lowerCAmelCase__ : Dict = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase__ : Optional[int] = output.images
lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
lowerCAmelCase__ : List[str] = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : Dict = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
lowerCAmelCase__ : Union[str, Any] = 10_4435_5234
lowerCAmelCase__ : Optional[int] = 12
lowerCAmelCase__ : Tuple = torch.manual_seed(a__ )
lowerCAmelCase__ : str = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=0 , )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = image[0, -3:, -3:, -1]
lowerCAmelCase__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
lowerCAmelCase__ : List[Any] = torch.manual_seed(a__ )
lowerCAmelCase__ : List[Any] = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="np" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCAmelCase__ : List[Any] = output.images
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 568 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""ConvNextFeatureExtractor"""]
snake_case = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 568 | 1 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def lowercase ( _a = 8 ) -> str:
UpperCAmelCase_: List[Any] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_a ) for _ in range(_a ) )
def lowercase ( _a ,_a ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(_a )
UpperCAmelCase_: List[Any] = i // 3
UpperCAmelCase_: str = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCAmelCase_: List[str] = (
chars_incl
+ random(_a ,quotient + remainder )
+ random(_a ,_a )
+ random(_a ,_a )
)
UpperCAmelCase_: str = list(_a )
shuffle(_a )
return "".join(_a )
# random is a generalised function for letters, characters and numbers
def lowercase ( _a ,_a ) -> str:
return "".join(secrets.choice(_a ) for _ in range(_a ) )
def lowercase ( _a ,_a ) -> Any:
pass # Put your code here...
def lowercase ( _a ,_a ) -> int:
pass # Put your code here...
def lowercase ( _a ,_a ) -> List[Any]:
pass # Put your code here...
def lowercase ( _a ,_a = 8 ) -> bool:
if len(_a ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCAmelCase_: Tuple = any(char in ascii_uppercase for char in password )
UpperCAmelCase_: Optional[int] = any(char in ascii_lowercase for char in password )
UpperCAmelCase_: Tuple = any(char in digits for char in password )
UpperCAmelCase_: str = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def lowercase ( ) -> List[str]:
UpperCAmelCase_: Tuple = int(input("Please indicate the max length of your password: " ).strip() )
UpperCAmelCase_: Any = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" ,password_generator(_a ) )
print(
"Alternative Password generated:" ,alternative_password_generator(_a ,_a ) ,)
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main() | 137 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 137 | 1 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : int , __lowercase : Collection[float] | None = None ):
"""simple docstring"""
if components is None:
snake_case_ = []
snake_case_ = list(__lowercase )
def __len__( self : Tuple ):
"""simple docstring"""
return len(self.__components )
def __str__( self : List[Any] ):
"""simple docstring"""
return "(" + ",".join(map(__lowercase , self.__components ) ) + ")"
def __add__( self : Tuple , __lowercase : Vector ):
"""simple docstring"""
snake_case_ = len(self )
if size == len(__lowercase ):
snake_case_ = [self.__components[i] + other.component(__lowercase ) for i in range(__lowercase )]
return Vector(__lowercase )
else:
raise Exception("must have the same size" )
def __sub__( self : List[Any] , __lowercase : Vector ):
"""simple docstring"""
snake_case_ = len(self )
if size == len(__lowercase ):
snake_case_ = [self.__components[i] - other.component(__lowercase ) for i in range(__lowercase )]
return Vector(__lowercase )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self : List[Any] , __lowercase : float ):
"""simple docstring"""
...
@overload
def __mul__( self : int , __lowercase : Vector ):
"""simple docstring"""
...
def __mul__( self : Tuple , __lowercase : float | Vector ):
"""simple docstring"""
if isinstance(__lowercase , (float, int) ):
snake_case_ = [c * other for c in self.__components]
return Vector(__lowercase )
elif isinstance(__lowercase , __lowercase ) and len(self ) == len(__lowercase ):
snake_case_ = len(self )
snake_case_ = [self.__components[i] * other.component(__lowercase ) for i in range(__lowercase )]
return sum(__lowercase )
else: # error case
raise Exception("invalid operand!" )
def snake_case__ ( self : int ):
"""simple docstring"""
return Vector(self.__components )
def snake_case__ ( self : List[Any] , __lowercase : int ):
"""simple docstring"""
if isinstance(__lowercase , __lowercase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def snake_case__ ( self : List[Any] , __lowercase : int , __lowercase : float ):
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
snake_case_ = value
def snake_case__ ( self : int ):
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
snake_case_ = [c**2 for c in self.__components]
return math.sqrt(sum(__lowercase ) )
def snake_case__ ( self : Optional[int] , __lowercase : Vector , __lowercase : bool = False ):
"""simple docstring"""
snake_case_ = self * other
snake_case_ = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
assert isinstance(_A , _A )
return Vector([0] * dimension )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
assert isinstance(_A , _A ) and (isinstance(_A , _A ))
snake_case_ = [0] * dimension
snake_case_ = 1
return Vector(_A )
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
assert (
isinstance(_A , _A )
and isinstance(_A , _A )
and (isinstance(_A , (int, float) ))
)
return x * scalar + y
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
random.seed(_A )
snake_case_ = [random.randint(_A , _A ) for _ in range(_A )]
return Vector(_A )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Any , __lowercase : list[list[float]] , __lowercase : int , __lowercase : int ):
"""simple docstring"""
snake_case_ = matrix
snake_case_ = w
snake_case_ = h
def __str__( self : Tuple ):
"""simple docstring"""
snake_case_ = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Any , __lowercase : Matrix ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
snake_case_ = []
for i in range(self.__height ):
snake_case_ = [
self.__matrix[i][j] + other.component(__lowercase , __lowercase )
for j in range(self.__width )
]
matrix.append(__lowercase )
return Matrix(__lowercase , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self : Tuple , __lowercase : Matrix ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
snake_case_ = []
for i in range(self.__height ):
snake_case_ = [
self.__matrix[i][j] - other.component(__lowercase , __lowercase )
for j in range(self.__width )
]
matrix.append(__lowercase )
return Matrix(__lowercase , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self : Optional[int] , __lowercase : float ):
"""simple docstring"""
...
@overload
def __mul__( self : Optional[int] , __lowercase : Vector ):
"""simple docstring"""
...
def __mul__( self : Optional[Any] , __lowercase : float | Vector ):
"""simple docstring"""
if isinstance(__lowercase , __lowercase ): # matrix-vector
if len(__lowercase ) == self.__width:
snake_case_ = zero_vector(self.__height )
for i in range(self.__height ):
snake_case_ = [
self.__matrix[i][j] * other.component(__lowercase )
for j in range(self.__width )
]
ans.change_component(__lowercase , sum(__lowercase ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(__lowercase , (int, float) ): # matrix-scalar
snake_case_ = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__lowercase , self.__width , self.__height )
return None
def snake_case__ ( self : Tuple ):
"""simple docstring"""
return self.__height
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
return self.__width
def snake_case__ ( self : int , __lowercase : int , __lowercase : int ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def snake_case__ ( self : List[str] , __lowercase : int , __lowercase : int , __lowercase : float ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
snake_case_ = value
else:
raise Exception("change_component: indices out of bounds" )
def snake_case__ ( self : Optional[int] , __lowercase : int , __lowercase : int ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception("Matrix is not square" )
snake_case_ = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__lowercase ) ):
snake_case_ = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__lowercase , self.__width - 1 , self.__height - 1 ).determinant()
def snake_case__ ( self : Optional[Any] , __lowercase : int , __lowercase : int ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__lowercase , __lowercase )
else:
raise Exception("Indices out of bounds" )
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
snake_case_ = [
self.__matrix[0][y] * self.cofactor(0 , __lowercase ) for y in range(self.__width )
]
return sum(__lowercase )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = [[0] * n for _ in range(_A )]
return Matrix(_A , _A , _A )
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
random.seed(_A )
snake_case_ = [
[random.randint(_A , _A ) for _ in range(_A )] for _ in range(_A )
]
return Matrix(_A , _A , _A )
| 139 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __lowercase : List[str] , __lowercase : Tuple , __lowercase : bool = True , __lowercase : bool = False ):
"""simple docstring"""
snake_case_ = scheduler
snake_case_ = optimizers if isinstance(__lowercase , (list, tuple) ) else [optimizers]
snake_case_ = split_batches
snake_case_ = step_with_optimizer
snake_case_ = GradientState()
def snake_case__ ( self : Dict , *__lowercase : List[str] , **__lowercase : Any ):
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowercase , **__lowercase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowercase , **__lowercase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
snake_case_ = AcceleratorState().num_processes
for _ in range(__lowercase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowercase , **__lowercase )
else:
self.scheduler.step(*__lowercase , **__lowercase )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
return self.scheduler.get_last_lr()
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
return self.scheduler.state_dict()
def snake_case__ ( self : Union[str, Any] , __lowercase : Union[str, Any] ):
"""simple docstring"""
self.scheduler.load_state_dict(__lowercase )
def snake_case__ ( self : int ):
"""simple docstring"""
return self.scheduler.get_lr()
def snake_case__ ( self : str , *__lowercase : int , **__lowercase : Optional[Any] ):
"""simple docstring"""
return self.scheduler.print_lr(*__lowercase , **__lowercase )
| 139 | 1 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = '''MobileNetV1Config'''
# Base docstring
__UpperCAmelCase = '''google/mobilenet_v1_1.0_224'''
__UpperCAmelCase = [1, 1_024, 7, 7]
# Image classification docstring
__UpperCAmelCase = '''google/mobilenet_v1_1.0_224'''
__UpperCAmelCase = '''tabby, tabby cat'''
__UpperCAmelCase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _snake_case ( A , A , A=None ) -> Optional[int]:
lowerCAmelCase__ = {}
if isinstance(A , A ):
lowerCAmelCase__ = model.mobilenet_va
else:
lowerCAmelCase__ = model
lowerCAmelCase__ = '''MobilenetV1/Conv2d_0/'''
lowerCAmelCase__ = backbone.conv_stem.convolution.weight
lowerCAmelCase__ = backbone.conv_stem.normalization.bias
lowerCAmelCase__ = backbone.conv_stem.normalization.weight
lowerCAmelCase__ = backbone.conv_stem.normalization.running_mean
lowerCAmelCase__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
lowerCAmelCase__ = i + 1
lowerCAmelCase__ = i * 2
lowerCAmelCase__ = backbone.layer[pt_index]
lowerCAmelCase__ = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
lowerCAmelCase__ = pointer.convolution.weight
lowerCAmelCase__ = pointer.normalization.bias
lowerCAmelCase__ = pointer.normalization.weight
lowerCAmelCase__ = pointer.normalization.running_mean
lowerCAmelCase__ = pointer.normalization.running_var
lowerCAmelCase__ = backbone.layer[pt_index + 1]
lowerCAmelCase__ = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
lowerCAmelCase__ = pointer.convolution.weight
lowerCAmelCase__ = pointer.normalization.bias
lowerCAmelCase__ = pointer.normalization.weight
lowerCAmelCase__ = pointer.normalization.running_mean
lowerCAmelCase__ = pointer.normalization.running_var
if isinstance(A , A ):
lowerCAmelCase__ = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
lowerCAmelCase__ = model.classifier.weight
lowerCAmelCase__ = model.classifier.bias
return tf_to_pt_map
def _snake_case ( A , A , A ) -> Any:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
lowerCAmelCase__ = tf.train.list_variables(A )
lowerCAmelCase__ = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
lowerCAmelCase__ = tf.train.load_variable(A , A )
lowerCAmelCase__ = array
# Build TF to PyTorch weights loading map
lowerCAmelCase__ = _build_tf_to_pytorch_map(A , A , A )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
lowerCAmelCase__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
lowerCAmelCase__ = np.transpose(A , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
lowerCAmelCase__ = array.squeeze().transpose()
else:
lowerCAmelCase__ = np.transpose(A , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
lowerCAmelCase__ = torch.from_numpy(A )
tf_weights.pop(A , A )
tf_weights.pop(name + '''/RMSProp''' , A )
tf_weights.pop(name + '''/RMSProp_1''' , A )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , A )
logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def _snake_case ( A , A ) -> torch.Tensor:
lowerCAmelCase__ , lowerCAmelCase__ = features.shape[-2:]
lowerCAmelCase__ , lowerCAmelCase__ = conv_layer.stride
lowerCAmelCase__ , lowerCAmelCase__ = conv_layer.kernel_size
if in_height % stride_height == 0:
lowerCAmelCase__ = max(kernel_height - stride_height , 0 )
else:
lowerCAmelCase__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowerCAmelCase__ = max(kernel_width - stride_width , 0 )
else:
lowerCAmelCase__ = max(kernel_width - (in_width % stride_width) , 0 )
lowerCAmelCase__ = pad_along_width // 2
lowerCAmelCase__ = pad_along_width - pad_left
lowerCAmelCase__ = pad_along_height // 2
lowerCAmelCase__ = pad_along_height - pad_top
lowerCAmelCase__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(A , A , '''constant''' , 0.0 )
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 , lowerCamelCase_ = 1 , lowerCamelCase_ = False , lowerCamelCase_ = True , lowerCamelCase_ = True , ) -> None:
super().__init__()
lowerCAmelCase__ = config
if in_channels % groups != 0:
raise ValueError(F"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(F"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
lowerCAmelCase__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowerCAmelCase__ = nn.Convad(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , kernel_size=lowerCamelCase_ , stride=lowerCamelCase_ , padding=lowerCamelCase_ , groups=lowerCamelCase_ , bias=lowerCamelCase_ , padding_mode='''zeros''' , )
if use_normalization:
lowerCAmelCase__ = nn.BatchNormad(
num_features=lowerCamelCase_ , eps=config.layer_norm_eps , momentum=0.9_997 , affine=lowerCamelCase_ , track_running_stats=lowerCamelCase_ , )
else:
lowerCAmelCase__ = None
if use_activation:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCamelCase_ ):
lowerCAmelCase__ = ACTaFN[config.hidden_act]
else:
lowerCAmelCase__ = config.hidden_act
else:
lowerCAmelCase__ = None
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> torch.Tensor:
if self.config.tf_padding:
lowerCAmelCase__ = apply_tf_padding(lowerCamelCase_ , self.convolution )
lowerCAmelCase__ = self.convolution(lowerCamelCase_ )
if self.normalization is not None:
lowerCAmelCase__ = self.normalization(lowerCamelCase_ )
if self.activation is not None:
lowerCAmelCase__ = self.activation(lowerCamelCase_ )
return features
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : int = MobileNetVaConfig
lowercase__ : Optional[Any] = load_tf_weights_in_mobilenet_va
lowercase__ : str = "mobilenet_v1"
lowercase__ : Dict = "pixel_values"
lowercase__ : Union[str, Any] = False
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
if isinstance(lowerCamelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCamelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__UpperCAmelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__UpperCAmelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , a__ , )
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> Dict:
super().__init__(lowerCamelCase_ )
lowerCAmelCase__ = config
lowerCAmelCase__ = 32
lowerCAmelCase__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowerCAmelCase__ = MobileNetVaConvLayer(
lowerCamelCase_ , in_channels=config.num_channels , out_channels=lowerCamelCase_ , kernel_size=3 , stride=2 , )
lowerCAmelCase__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowerCAmelCase__ = nn.ModuleList()
for i in range(13 ):
lowerCAmelCase__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowerCAmelCase__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
lowerCamelCase_ , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , kernel_size=3 , stride=strides[i] , groups=lowerCamelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
lowerCamelCase_ , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , kernel_size=1 , ) )
lowerCAmelCase__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Any:
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
lowerCAmelCase__ = self.conv_stem(lowerCamelCase_ )
lowerCAmelCase__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowerCAmelCase__ = layer_module(lowerCamelCase_ )
if output_hidden_states:
lowerCAmelCase__ = all_hidden_states + (hidden_states,)
lowerCAmelCase__ = hidden_states
if self.pooler is not None:
lowerCAmelCase__ = torch.flatten(self.pooler(lowerCamelCase_ ) , start_dim=1 )
else:
lowerCAmelCase__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase_ , pooler_output=lowerCamelCase_ , hidden_states=lowerCamelCase_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , a__ , )
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> None:
super().__init__(lowerCamelCase_ )
lowerCAmelCase__ = config.num_labels
lowerCAmelCase__ = MobileNetVaModel(lowerCamelCase_ )
lowerCAmelCase__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowerCAmelCase__ = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCamelCase_ )
lowerCAmelCase__ = nn.Linear(lowerCamelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.mobilenet_va(lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ )
lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ = self.classifier(self.dropout(lowerCamelCase_ ) )
lowerCAmelCase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase__ = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase__ = '''single_label_classification'''
else:
lowerCAmelCase__ = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowerCAmelCase__ = MSELoss()
if self.num_labels == 1:
lowerCAmelCase__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase__ = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase__ = CrossEntropyLoss()
lowerCAmelCase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase__ = BCEWithLogitsLoss()
lowerCAmelCase__ = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
if not return_dict:
lowerCAmelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states , ) | 90 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : Optional[int] = logging.get_logger(__name__)
__snake_case : Any = '▁'
__snake_case : Any = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
__snake_case : Union[str, Any] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
__snake_case : Optional[Any] = {
'facebook/s2t-small-librispeech-asr': 1_024,
}
__snake_case : Tuple = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
__snake_case : Optional[Any] = {'mustc': MUSTC_LANGS}
class UpperCamelCase ( a ):
"""simple docstring"""
_lowerCamelCase : int =VOCAB_FILES_NAMES
_lowerCamelCase : Any =PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Any =MAX_MODEL_INPUT_SIZES
_lowerCamelCase : List[str] =["input_ids", "attention_mask"]
_lowerCamelCase : List[int] =[]
def __init__( self : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Optional[int]="<s>" , _lowerCamelCase : Optional[int]="</s>" , _lowerCamelCase : List[Any]="<pad>" , _lowerCamelCase : Dict="<unk>" , _lowerCamelCase : int=False , _lowerCamelCase : Any=False , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : str , ):
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
A__ = do_upper_case
A__ = do_lower_case
A__ = load_json(_lowerCamelCase )
A__ = {v: k for k, v in self.encoder.items()}
A__ = spm_file
A__ = load_spm(_lowerCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
A__ = lang_codes
A__ = LANGUAGES[lang_codes]
A__ = [F'''<lang:{lang}>''' for lang in self.langs]
A__ = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
A__ = self.lang_tokens
A__ = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
A__ = {}
@property
def A__ ( self : Any ):
return len(self.encoder )
@property
def A__ ( self : Dict ):
return self._tgt_lang
@tgt_lang.setter
def A__ ( self : Any , _lowerCamelCase : Optional[Any] ):
A__ = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCamelCase )
def A__ ( self : Any , _lowerCamelCase : str ):
A__ = self.lang_code_to_id[tgt_lang]
A__ = [lang_code_id]
def A__ ( self : Dict , _lowerCamelCase : str ):
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def A__ ( self : List[str] , _lowerCamelCase : List[str] ):
return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] )
def A__ ( self : List[Any] , _lowerCamelCase : int ):
return self.decoder.get(_lowerCamelCase , self.unk_token )
def A__ ( self : int , _lowerCamelCase : List[str] ):
A__ = []
A__ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
A__ = self.sp_model.decode(_lowerCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
A__ = []
else:
current_sub_tokens.append(_lowerCamelCase )
A__ = self.sp_model.decode(_lowerCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def A__ ( self : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any]=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def A__ ( self : Optional[int] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
A__ = [1] * len(self.prefix_tokens )
A__ = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def A__ ( self : List[str] ):
A__ = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self : Tuple , _lowerCamelCase : Dict ):
A__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A__ = {}
A__ = load_spm(self.spm_file , self.sp_model_kwargs )
def A__ ( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
A__ = Path(_lowerCamelCase )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
A__ = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
A__ = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (str(_lowerCamelCase ), str(_lowerCamelCase ))
def a_ ( __a , __a ):
A__ = sentencepiece.SentencePieceProcessor(**__a )
spm.Load(str(__a ) )
return spm
def a_ ( __a ):
with open(__a , '''r''' ) as f:
return json.load(__a )
def a_ ( __a , __a ):
with open(__a , '''w''' ) as f:
json.dump(__a , __a , indent=2 )
| 571 | 0 |
from __future__ import annotations
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
__lowerCamelCase : str = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase , __lowerCamelCase : List[str] = len(SCREAMING_SNAKE_CASE__ ), len(grid[0] )
if (
min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__lowerCamelCase : Tuple = 0
count += depth_first_search(SCREAMING_SNAKE_CASE__ , row + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , row - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , col + 1 , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , col - 1 , SCREAMING_SNAKE_CASE__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__lowercase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
__lowercase = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__lowercase = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_6000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
# load decoder from hub
__lowercase = """hf-internal-testing/ngram-beam-search-decoder"""
def _a ( self : Optional[Any] , **_lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(__lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _a ( self : List[str] , **_lowerCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _a ( self : List[str] , **_lowerCAmelCase : str ) -> int:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__lowerCAmelCase )
def _a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = self.get_feature_extractor()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __lowerCAmelCase )
def _a ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__lowerCAmelCase , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__lowerCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _a ( self : Any ) -> int:
"""simple docstring"""
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
__lowercase = floats_list((3, 1000) )
__lowercase = feature_extractor(__lowerCAmelCase , return_tensors="""np""" )
__lowercase = processor(__lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
__lowercase = """This is a test string"""
__lowercase = processor(text=__lowerCAmelCase )
__lowercase = tokenizer(__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self : Optional[int] , _lowerCAmelCase : Any=(2, 10, 16) , _lowerCAmelCase : Optional[Any]=77 ) -> Optional[Any]:
"""simple docstring"""
np.random.seed(__lowerCAmelCase )
return np.random.rand(*__lowerCAmelCase )
def _a ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
__lowercase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowercase = processor.decode(__lowerCAmelCase )
__lowercase = decoder.decode_beams(__lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def _a ( self : int , _lowerCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
__lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase = processor.batch_decode(__lowerCAmelCase )
else:
with get_context(__lowerCAmelCase ).Pool() as pool:
__lowercase = processor.batch_decode(__lowerCAmelCase , __lowerCAmelCase )
__lowercase = list(__lowerCAmelCase )
with get_context("""fork""" ).Pool() as p:
__lowercase = decoder.decode_beams_batch(__lowerCAmelCase , __lowerCAmelCase )
__lowercase , __lowercase , __lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__lowerCAmelCase , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__lowerCAmelCase , decoded_processor.logit_score )
self.assertListEqual(__lowerCAmelCase , decoded_processor.lm_score )
def _a ( self : Any ) -> int:
"""simple docstring"""
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 15
__lowercase = -20.0
__lowercase = -4.0
__lowercase = processor.batch_decode(
__lowerCAmelCase , beam_width=__lowerCAmelCase , beam_prune_logp=__lowerCAmelCase , token_min_logp=__lowerCAmelCase , )
__lowercase = decoded_processor_out.text
__lowercase = list(__lowerCAmelCase )
with get_context("""fork""" ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
__lowerCAmelCase , __lowerCAmelCase , beam_width=__lowerCAmelCase , beam_prune_logp=__lowerCAmelCase , token_min_logp=__lowerCAmelCase , )
__lowercase = [d[0][0] for d in decoded_decoder_out]
__lowercase = [d[0][2] for d in decoded_decoder_out]
__lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __lowerCAmelCase )
self.assertTrue(np.array_equal(__lowerCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __lowerCAmelCase , atol=1e-3 ) )
self.assertTrue(np.array_equal(__lowerCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , __lowerCAmelCase , atol=1e-3 ) )
def _a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
__lowercase = self._get_dummy_logits()
__lowercase = 2.0
__lowercase = 5.0
__lowercase = -20.0
__lowercase = True
__lowercase = processor.batch_decode(
__lowerCAmelCase , alpha=__lowerCAmelCase , beta=__lowerCAmelCase , unk_score_offset=__lowerCAmelCase , lm_score_boundary=__lowerCAmelCase , )
__lowercase = decoded_processor_out.text
__lowercase = list(__lowerCAmelCase )
decoder.reset_params(
alpha=__lowerCAmelCase , beta=__lowerCAmelCase , unk_score_offset=__lowerCAmelCase , lm_score_boundary=__lowerCAmelCase , )
with get_context("""fork""" ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
__lowerCAmelCase , __lowerCAmelCase , )
__lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __lowerCAmelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __lowerCAmelCase )
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase = os.listdir(__lowerCAmelCase )
__lowercase = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(__lowerCAmelCase )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__lowercase = os.listdir(__lowerCAmelCase )
__lowercase = os.listdir(__lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase = floats_list((3, 1000) )
__lowercase = processor_wavaveca(__lowerCAmelCase , return_tensors="""np""" )
__lowercase = processor_auto(__lowerCAmelCase , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__lowercase = self._get_dummy_logits()
__lowercase = processor_wavaveca.batch_decode(__lowerCAmelCase )
__lowercase = processor_auto.batch_decode(__lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def _a ( _lowerCAmelCase : str , _lowerCAmelCase : List[str] ) -> str:
"""simple docstring"""
__lowercase = [d[key] for d in offsets]
return retrieved_list
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase = self._get_dummy_logits()[0]
__lowercase = processor.decode(__lowerCAmelCase , output_word_offsets=__lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__lowercase = self._get_dummy_logits()
__lowercase = processor.batch_decode(__lowerCAmelCase , output_word_offsets=__lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__lowerCAmelCase , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
import torch
__lowercase = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__lowerCAmelCase )
__lowercase = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_6000 ) )
__lowercase = iter(__lowerCAmelCase )
__lowercase = next(__lowerCAmelCase )
__lowercase = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__lowercase = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__lowercase = model(__lowerCAmelCase ).logits.cpu().numpy()
__lowercase = processor.decode(logits[0] , output_word_offsets=__lowerCAmelCase )
__lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__lowercase = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__lowerCAmelCase , """word""" ) ) , __lowerCAmelCase )
self.assertEqual(""" """.join(self.get_from_offsets(__lowerCAmelCase , """word""" ) ) , output.text )
# output times
__lowercase = torch.tensor(self.get_from_offsets(__lowerCAmelCase , """start_time""" ) )
__lowercase = torch.tensor(self.get_from_offsets(__lowerCAmelCase , """end_time""" ) )
# fmt: off
__lowercase = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
__lowercase = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=0.01 ) )
| 80 | '''simple docstring'''
def snake_case__ ( _A: int ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(_A , _A ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(_A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 370 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Tuple =DanceDiffusionPipeline
UpperCamelCase__ : Any =UNCONDITIONAL_AUDIO_GENERATION_PARAMS
UpperCamelCase__ : Optional[Any] =PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
UpperCamelCase__ : str =UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : List[Any] =False
def A__ ( self : int ):
torch.manual_seed(0 )
lowercase__ = UNetaDModel(
block_out_channels=(32, 32, 64), extra_in_channels=16, sample_size=512, sample_rate=1_6000, in_channels=2, out_channels=2, flip_sin_to_cos=__lowercase, use_timestep_embedding=__lowercase, time_embedding_type="fourier", mid_block_type="UNetMidBlock1D", down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), )
lowercase__ = IPNDMScheduler()
lowercase__ = {
"unet": unet,
"scheduler": scheduler,
}
return components
def A__ ( self : int, __lowercase : int, __lowercase : Optional[Any]=0 ):
if str(__lowercase ).startswith("mps" ):
lowercase__ = torch.manual_seed(__lowercase )
else:
lowercase__ = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
lowercase__ = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def A__ ( self : List[str] ):
lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = DanceDiffusionPipeline(**__lowercase )
lowercase__ = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = self.get_dummy_inputs(__lowercase )
lowercase__ = pipe(**__lowercase )
lowercase__ = output.audios
lowercase__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowercase__ = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A__ ( self : Union[str, Any] ):
return super().test_save_load_local()
@skip_mps
def A__ ( self : Union[str, Any] ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def A__ ( self : Dict ):
return super().test_save_load_optional_components()
@skip_mps
def A__ ( self : Dict ):
return super().test_attention_slicing_forward_pass()
def A__ ( self : Union[str, Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase):
def A__ ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : Any ):
lowercase__ = torch_device
lowercase__ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
lowercase__ = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(generator=__lowercase, num_inference_steps=100, audio_length_in_s=4.096 )
lowercase__ = output.audios
lowercase__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowercase__ = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self : str ):
lowercase__ = torch_device
lowercase__ = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k", torch_dtype=torch.floataa )
lowercase__ = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(generator=__lowercase, num_inference_steps=100, audio_length_in_s=4.096 )
lowercase__ = output.audios
lowercase__ = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowercase__ = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 37 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = "huggingface/label-files"
lowercase__ = "imagenet-1k-id2label.json"
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowercase__ = BitConfig(
conv_layer=SCREAMING_SNAKE_CASE_ , num_labels=1000 , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , )
return config
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if "stem.conv" in name:
lowercase__ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
lowercase__ = name.replace("blocks" , "layers" )
if "head.fc" in name:
lowercase__ = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
lowercase__ = "bit." + name
if "bit" not in name and "classifier" not in name:
lowercase__ = "bit.encoder." + name
return name
def __lowerCAmelCase ( ):
lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
lowercase__ = get_config(SCREAMING_SNAKE_CASE_ )
# load original model from timm
lowercase__ = create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
# load state_dict of original model
lowercase__ = timm_model.state_dict()
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ = val.squeeze() if "head" in key else val
# load HuggingFace model
lowercase__ = BitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# create image processor
lowercase__ = create_transform(**resolve_data_config({} , model=SCREAMING_SNAKE_CASE_ ) )
lowercase__ = transform.transforms
lowercase__ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
lowercase__ = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=SCREAMING_SNAKE_CASE_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=SCREAMING_SNAKE_CASE_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase__ = prepare_img()
lowercase__ = transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
lowercase__ = processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# verify logits
with torch.no_grad():
lowercase__ = model(SCREAMING_SNAKE_CASE_ )
lowercase__ = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
lowercase__ = timm_model(SCREAMING_SNAKE_CASE_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
lowercase_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 37 | 1 |
'''simple docstring'''
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : Any = RoFormerTokenizer
lowerCamelCase : List[Any] = RoFormerTokenizerFast
lowerCamelCase : List[str] = True
lowerCamelCase : Union[str, Any] = True
def A__ ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
def A__ ( self , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **lowerCamelCase__ )
def A__ ( self , **lowerCamelCase__ ) -> int:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **lowerCamelCase__ )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = """永和服装饰品有限公司,今天天气非常好"""
lowercase__ = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_chinese_input_output_texts()
lowercase__ = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , output_text.split() )
lowercase__ = tokens + [tokenizer.unk_token]
lowercase__ = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = self.get_rust_tokenizer()
lowercase__ = self.get_chinese_input_output_texts()
lowercase__ = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , output_text.split() )
lowercase__ = tokens + [tokenizer.unk_token]
lowercase__ = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def A__ ( self ) -> Any:
'''simple docstring'''
pass
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def A__ ( self ) -> int:
'''simple docstring'''
pass
| 325 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 409 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
a_ = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""BeitFeatureExtractor"""]
a_ = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 716 | import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __lowerCAmelCase ( A_ : Tuple ) -> Any:
__UpperCAmelCase = 3_84
__UpperCAmelCase = 7
if "tiny" in model_name:
__UpperCAmelCase = 96
__UpperCAmelCase = (2, 2, 6, 2)
__UpperCAmelCase = (3, 6, 12, 24)
elif "small" in model_name:
__UpperCAmelCase = 96
__UpperCAmelCase = (2, 2, 18, 2)
__UpperCAmelCase = (3, 6, 12, 24)
elif "base" in model_name:
__UpperCAmelCase = 1_28
__UpperCAmelCase = (2, 2, 18, 2)
__UpperCAmelCase = (4, 8, 16, 32)
__UpperCAmelCase = 12
__UpperCAmelCase = 5_12
elif "large" in model_name:
__UpperCAmelCase = 1_92
__UpperCAmelCase = (2, 2, 18, 2)
__UpperCAmelCase = (6, 12, 24, 48)
__UpperCAmelCase = 12
__UpperCAmelCase = 7_68
# set label information
__UpperCAmelCase = 1_50
__UpperCAmelCase = "huggingface/label-files"
__UpperCAmelCase = "ade20k-id2label.json"
__UpperCAmelCase = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
__UpperCAmelCase = {int(A_ ): v for k, v in idalabel.items()}
__UpperCAmelCase = {v: k for k, v in idalabel.items()}
__UpperCAmelCase = SwinConfig(
embed_dim=A_ , depths=A_ , num_heads=A_ , window_size=A_ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
__UpperCAmelCase = UperNetConfig(
backbone_config=A_ , auxiliary_in_channels=A_ , num_labels=A_ , idalabel=A_ , labelaid=A_ , )
return config
def __lowerCAmelCase ( A_ : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.stages.{i}.downsample.reduction.weight''', F'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.weight''', F'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.bias''', F'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def __lowerCAmelCase ( A_ : List[Any] , A_ : List[str] , A_ : Optional[Any] ) -> List[str]:
__UpperCAmelCase = dct.pop(A_ )
__UpperCAmelCase = val
def __lowerCAmelCase ( A_ : Tuple , A_ : Dict ) -> Tuple:
__UpperCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCAmelCase = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
__UpperCAmelCase = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase = in_proj_weight[:dim, :]
__UpperCAmelCase = in_proj_bias[: dim]
__UpperCAmelCase = in_proj_weight[
dim : dim * 2, :
]
__UpperCAmelCase = in_proj_bias[
dim : dim * 2
]
__UpperCAmelCase = in_proj_weight[
-dim :, :
]
__UpperCAmelCase = in_proj_bias[-dim :]
# fmt: on
def __lowerCAmelCase ( A_ : List[Any] ) -> int:
__UpperCAmelCase , __UpperCAmelCase = x.shape
__UpperCAmelCase = x.reshape(A_ , 4 , in_channel // 4 )
__UpperCAmelCase = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(A_ , A_ )
return x
def __lowerCAmelCase ( A_ : Tuple ) -> int:
__UpperCAmelCase , __UpperCAmelCase = x.shape
__UpperCAmelCase = x.reshape(A_ , in_channel // 4 , 4 )
__UpperCAmelCase = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(A_ , A_ )
return x
def __lowerCAmelCase ( A_ : Union[str, Any] ) -> str:
__UpperCAmelCase = x.shape[0]
__UpperCAmelCase = x.reshape(4 , in_channel // 4 )
__UpperCAmelCase = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(A_ )
return x
def __lowerCAmelCase ( A_ : Optional[int] ) -> str:
__UpperCAmelCase = x.shape[0]
__UpperCAmelCase = x.reshape(in_channel // 4 , 4 )
__UpperCAmelCase = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(A_ )
return x
def __lowerCAmelCase ( A_ : Any , A_ : str , A_ : Any ) -> Dict:
__UpperCAmelCase = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
__UpperCAmelCase = model_name_to_url[model_name]
__UpperCAmelCase = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" , file_name=A_ )[
"state_dict"
]
for name, param in state_dict.items():
print(A_ , param.shape )
__UpperCAmelCase = get_upernet_config(A_ )
__UpperCAmelCase = UperNetForSemanticSegmentation(A_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase = state_dict.pop(A_ )
if "bn" in key:
__UpperCAmelCase = key.replace("bn" , "batch_norm" )
__UpperCAmelCase = val
# rename keys
__UpperCAmelCase = create_rename_keys(A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
read_in_q_k_v(A_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__UpperCAmelCase = reverse_correct_unfold_reduction_order(A_ )
if "norm" in key:
__UpperCAmelCase = reverse_correct_unfold_norm_order(A_ )
model.load_state_dict(A_ )
# verify on image
__UpperCAmelCase = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__UpperCAmelCase = Image.open(requests.get(A_ , stream=A_ ).raw ).convert("RGB" )
__UpperCAmelCase = SegformerImageProcessor()
__UpperCAmelCase = processor(A_ , return_tensors="pt" ).pixel_values
with torch.no_grad():
__UpperCAmelCase = model(A_ )
__UpperCAmelCase = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__UpperCAmelCase = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] )
elif model_name == "upernet-swin-small":
__UpperCAmelCase = torch.tensor(
[[-7.19_21, -7.19_21, -6.95_32], [-7.19_21, -7.19_21, -6.95_32], [-7.09_08, -7.09_08, -6.85_34]] )
elif model_name == "upernet-swin-base":
__UpperCAmelCase = torch.tensor(
[[-6.58_51, -6.58_51, -6.43_30], [-6.58_51, -6.58_51, -6.43_30], [-6.47_63, -6.47_63, -6.32_54]] )
elif model_name == "upernet-swin-large":
__UpperCAmelCase = torch.tensor(
[[-7.52_97, -7.52_97, -7.38_02], [-7.52_97, -7.52_97, -7.38_02], [-7.40_44, -7.40_44, -7.25_86]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , A_ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A_ )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[F"upernet-swin-{size}" for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
a_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 286 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Union[str, Any] = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[Any] = '''examples/'''
__lowerCAmelCase : Union[str, Any] = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__lowerCAmelCase : Union[str, Any] = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
__lowerCAmelCase : List[Any] = '''README.md'''
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict ):
'''simple docstring'''
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : Any = f.read()
snake_case_ , snake_case_ : Optional[int] = REPLACE_PATTERNS[pattern]
snake_case_ : Union[str, Any] = replace.replace("""VERSION""" , __UpperCamelCase )
snake_case_ : List[Any] = re_pattern.sub(__UpperCamelCase , __UpperCamelCase )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern="""examples""" )
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : int=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = """🤗 Transformers currently provides the following architectures"""
snake_case_ : Union[str, Any] = """1. Want to contribute a new model?"""
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case_ : str = f.readlines()
# Find the start of the list.
snake_case_ : List[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
snake_case_ : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
snake_case_ : Any = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
snake_case_ : Any = f.read()
snake_case_ : Tuple = REPLACE_PATTERNS["""init"""][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str=False ):
'''simple docstring'''
snake_case_ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
snake_case_ : str = default_version.base_version
elif patch:
snake_case_ : str = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
snake_case_ : str = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
snake_case_ : int = input(F'Which version are you releasing? [{default_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Optional[int] = default_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase , patch=__UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = get_version()
snake_case_ : str = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
snake_case_ : Tuple = current_version.base_version
# Check with the user we got that right.
snake_case_ : Optional[int] = input(F'Which version are we developing now? [{dev_version}]' )
if len(__UpperCamelCase ) == 0:
snake_case_ : Dict = dev_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__lowerCAmelCase : str = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 58 | 1 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE : Dict = parse(importlib.metadata.version('''torch'''))
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
A__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = parse(importlib.metadata.version(lowerCAmelCase__ ) )
return operation(lowerCAmelCase__ ,parse(lowerCAmelCase__ ) )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return compare_versions(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
| 702 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Union[str, Any] = """timm_backbone"""
def __init__( self , __a=None , __a=3 , __a=True , __a=True , __a=None , **__a , ):
"""simple docstring"""
super().__init__(**__a )
A__ = backbone
A__ = num_channels
A__ = features_only
A__ = use_pretrained_backbone
A__ = True
A__ = out_indices if out_indices is not None else (-1,)
| 554 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
assert column_title.isupper()
_lowerCamelCase : str = 0
_lowerCamelCase : Union[str, Any] = len(_lowerCAmelCase ) - 1
_lowerCamelCase : Optional[Any] = 0
while index >= 0:
_lowerCamelCase : List[Any] = (ord(column_title[index] ) - 64) * pow(26 , _lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 44 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__a: Tuple = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: int = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Optional[Any] = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__a: Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 152 | 0 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int]=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match"""
snake_case__ : Optional[Any] = nn.Parameter(__lowerCAmelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match"""
snake_case__ : Optional[int] = nn.Parameter(__lowerCAmelCase )
def _a ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
snake_case__ : Any = np.asarray(weights[0] )
snake_case__ : Any = np.asarray(weights[1] )
snake_case__ : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowerCAmelCase ).view(-1 , __lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def _a ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
snake_case__ : Dict = np.asarray(weights[0] )
snake_case__ : int = np.asarray(weights[1] )
snake_case__ : Dict = np.asarray(weights[2] )
snake_case__ : Tuple = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowerCAmelCase ).view(-1 , __lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def _a ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
snake_case__ : str = weights[0][0][0]
snake_case__ : Optional[int] = np.asarray(layer_norm_a[0] )
snake_case__ : str = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) , )
# lsh weights + output
snake_case__ : Union[str, Any] = weights[0][1]
if len(__lowerCAmelCase ) < 4:
set_layer_weights_in_torch_lsh(__lowerCAmelCase , torch_block.attention , __lowerCAmelCase )
else:
set_layer_weights_in_torch_local(__lowerCAmelCase , torch_block.attention , __lowerCAmelCase )
# intermediate weighs
snake_case__ : Optional[int] = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowerCAmelCase ) == 4:
snake_case__ : Optional[int] = intermediate_weights[2]
# layernorm 2
snake_case__ : Any = np.asarray(intermediate_weights[0][0] )
snake_case__ : List[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) , )
# intermediate dense
snake_case__ : Optional[int] = np.asarray(intermediate_weights[1][0] )
snake_case__ : Any = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCAmelCase ) , )
# intermediate out
snake_case__ : List[Any] = np.asarray(intermediate_weights[4][0] )
snake_case__ : List[str] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCAmelCase ) , )
def _a ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str ):
"""simple docstring"""
snake_case__ : Union[str, Any] = torch_model.reformer
# word embeds
snake_case__ : List[Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowerCAmelCase ) , )
if isinstance(weights[3] , __lowerCAmelCase ):
snake_case__ : List[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
snake_case__ : List[str] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"""{position_embeddings[emb_idx]} emb does not match"""
snake_case__ : int = nn.Parameter(torch.tensor(__lowerCAmelCase ) )
snake_case__ : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowerCAmelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
snake_case__ : List[Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# output layer norm
snake_case__ : Dict = np.asarray(weights[7][0] )
snake_case__ : Tuple = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) , )
# output embeddings
snake_case__ : str = np.asarray(weights[9][0] )
snake_case__ : str = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCAmelCase ) , )
def _a ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
snake_case__ : Optional[int] = ReformerConfig.from_json_file(__lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
snake_case__ : Any = ReformerModelWithLMHead(__lowerCAmelCase )
with open(__lowerCAmelCase , '''rb''' ) as f:
snake_case__ : int = pickle.load(__lowerCAmelCase )['''weights''']
set_model_weights_in_torch(__lowerCAmelCase , __lowerCAmelCase , config.hidden_size )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase__ : int = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 502 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = """pixel_values"""
__UpperCAmelCase = False
__UpperCAmelCase = TimmBackboneConfig
def __init__( self : str , snake_case_ : str , **snake_case_ : Dict ):
'''simple docstring'''
requires_backends(self , '''timm''' )
super().__init__(snake_case_ )
snake_case__ : List[str] = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(snake_case_ , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
snake_case__ : Dict = getattr(snake_case_ , '''use_pretrained_backbone''' , snake_case_ )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
snake_case__ : int = config.out_indices if getattr(snake_case_ , '''out_indices''' , snake_case_ ) is not None else (-1,)
snake_case__ : Any = timm.create_model(
config.backbone , pretrained=snake_case_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=snake_case_ , **snake_case_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
snake_case__ : str = self._backbone.return_layers
snake_case__ : List[Any] = {layer['''module''']: str(snake_case_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(snake_case_ )
@classmethod
def __magic_name__ ( cls : Union[str, Any] , snake_case_ : Union[str, Any] , *snake_case_ : Optional[Any] , **snake_case_ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
snake_case__ : Dict = kwargs.pop('''config''' , TimmBackboneConfig() )
snake_case__ : Optional[Any] = kwargs.pop('''use_timm_backbone''' , snake_case_ )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
snake_case__ : List[Any] = kwargs.pop('''num_channels''' , config.num_channels )
snake_case__ : Optional[Any] = kwargs.pop('''features_only''' , config.features_only )
snake_case__ : Tuple = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
snake_case__ : Union[str, Any] = kwargs.pop('''out_indices''' , config.out_indices )
snake_case__ : int = TimmBackboneConfig(
backbone=snake_case_ , num_channels=snake_case_ , features_only=snake_case_ , use_pretrained_backbone=snake_case_ , out_indices=snake_case_ , )
return super()._from_config(snake_case_ , **snake_case_ )
def __magic_name__ ( self : List[str] , snake_case_ : Union[str, Any] ):
'''simple docstring'''
pass
def __magic_name__ ( self : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Tuple=None , snake_case_ : Tuple=None , snake_case_ : Optional[Any]=None , **snake_case_ : List[str] ):
'''simple docstring'''
snake_case__ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ : int = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
snake_case__ : Any = self._all_layers
snake_case__ : str = self._backbone(snake_case_ , **snake_case_ )
snake_case__ : Tuple = self._return_layers
snake_case__ : str = tuple(hidden_states[i] for i in self.out_indices )
else:
snake_case__ : Dict = self._backbone(snake_case_ , **snake_case_ )
snake_case__ : Any = None
snake_case__ : Union[str, Any] = tuple(snake_case_ )
snake_case__ : Optional[int] = tuple(snake_case_ ) if hidden_states is not None else None
if not return_dict:
snake_case__ : List[str] = (feature_maps,)
if output_hidden_states:
snake_case__ : Optional[int] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=snake_case_ , hidden_states=snake_case_ , attentions=snake_case_ )
| 502 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def A ( lowercase__ : Optional[Any]=None ) -> Dict:
UpperCamelCase__ :int = argparse.ArgumentParser(add_help=lowercase__ , allow_abbrev=lowercase__ )
# The main config parser
UpperCamelCase__ :Tuple = config_command_parser(lowercase__ )
# The subparser to add commands to
UpperCamelCase__ :int = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(lowercase__ , parents=[parent_parser] )
update_command_parser(lowercase__ , parents=[parent_parser] )
return config_parser
def A ( ) -> Tuple:
UpperCamelCase__ :Any = get_config_parser()
UpperCamelCase__ :Optional[int] = config_parser.parse_args()
if not hasattr(lowercase__ , """func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(lowercase__ )
if __name__ == "__main__":
main() | 45 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_A = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 158 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase_ = logging.get_logger(__name__)
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] )->Optional[int]:
_lowerCAmelCase = b.T
_lowerCAmelCase = np.sum(np.square(__SCREAMING_SNAKE_CASE ) , axis=1 )
_lowerCAmelCase = np.sum(np.square(__SCREAMING_SNAKE_CASE ) , axis=0 )
_lowerCAmelCase = np.matmul(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_lowerCAmelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Dict:
_lowerCAmelCase = x.reshape(-1 , 3 )
_lowerCAmelCase = squared_euclidean_distance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return np.argmin(__SCREAMING_SNAKE_CASE , axis=1 )
class UpperCAmelCase ( snake_case__ ):
SCREAMING_SNAKE_CASE__ = ['''pixel_values''']
def __init__( self , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = True , _lowerCAmelCase = True , **_lowerCAmelCase , ):
super().__init__(**lowercase_ )
_lowerCAmelCase = size if size is not None else {"height": 256, "width": 256}
_lowerCAmelCase = get_size_dict(lowercase_ )
_lowerCAmelCase = np.array(lowercase_ ) if clusters is not None else None
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = resample
_lowerCAmelCase = do_normalize
_lowerCAmelCase = do_color_quantize
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = None , **_lowerCAmelCase , ):
_lowerCAmelCase = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
lowercase_ , size=(size['''height'''], size['''width''']) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None , ):
_lowerCAmelCase = rescale(image=lowercase_ , scale=1 / 127.5 , data_format=lowercase_ )
_lowerCAmelCase = image - 1
return image
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ):
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(lowercase_ )
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_lowerCAmelCase = clusters if clusters is not None else self.clusters
_lowerCAmelCase = np.array(lowercase_ )
_lowerCAmelCase = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
_lowerCAmelCase = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
_lowerCAmelCase = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_normalize:
_lowerCAmelCase = [self.normalize(image=lowercase_ ) for image in images]
if do_color_quantize:
_lowerCAmelCase = [to_channel_dimension_format(lowercase_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_lowerCAmelCase = np.array(lowercase_ )
_lowerCAmelCase = color_quantize(lowercase_ , lowercase_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
_lowerCAmelCase = images.shape[0]
_lowerCAmelCase = images.reshape(lowercase_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
_lowerCAmelCase = list(lowercase_ )
else:
_lowerCAmelCase = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
_lowerCAmelCase = {"input_ids": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ ) | 704 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 664 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a__ : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( _lowerCAmelCase ):
'''simple docstring'''
_lowerCamelCase =["pixel_values"]
def __init__( self : str , a__ : bool = True , a__ : Optional[Dict[str, int]] = None , a__ : PILImageResampling = PILImageResampling.BICUBIC , a__ : bool = True , a__ : bool = True , a__ : Union[int, float] = 1 / 255 , a__ : Dict[str, int] = None , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , **a__ : List[Any] , ):
super().__init__(**a__ )
UpperCAmelCase = size if size is not None else {"height": 224, "width": 224}
UpperCAmelCase = get_size_dict(a__ )
UpperCAmelCase = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = do_rescale
UpperCAmelCase = do_normalize
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = rescale_factor
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __snake_case ( self : Optional[int] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[Any] , ):
UpperCAmelCase = get_size_dict(a__ )
if "shortest_edge" in size:
UpperCAmelCase = get_resize_output_image_size(a__ , size=size['''shortest_edge'''] , default_to_square=a__ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCAmelCase = (size["height"], size["width"])
else:
raise ValueError(f"Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}" )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def __snake_case ( self : str , a__ : np.ndarray , a__ : Dict[str, int] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
UpperCAmelCase = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__ )
def __snake_case ( self : Tuple , a__ : np.ndarray , a__ : float , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[Any] ):
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Tuple , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : List[Any] , ):
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Optional[Any] , a__ : ImageInput , a__ : Optional[bool] = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : int = None , a__ : Optional[bool] = None , a__ : Optional[float] = None , a__ : Optional[bool] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a__ : Dict , ):
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' , default_to_square=a__ )
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(a__ )
if not is_batched(a__ ):
UpperCAmelCase = [images]
if not valid_images(a__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(a__ ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=a__ , size=a__ , resample=a__ ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(image=a__ , size=a__ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=a__ , scale=a__ ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=a__ , mean=a__ , std=a__ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(a__ , a__ ) for image in images]
UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=a__ , tensor_type=a__ )
| 51 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class a_ ( _lowerCAmelCase ):
def lowercase__ ( self : Tuple , lowercase : float ):
"""simple docstring"""
return 0.0
def UpperCAmelCase_ ( __lowerCamelCase : np.ndarray ,__lowerCamelCase : int ):
lowercase_ :List[str] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowercase_ :Any = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def UpperCAmelCase_ ( __lowerCamelCase : FilterType ,__lowerCamelCase : int ):
lowercase_ :List[Any] = 5_12
lowercase_ :Any = [1] + [0] * (size - 1)
lowercase_ :List[str] = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ :Optional[int] = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ :Tuple = np.abs(np.fft.fft(__lowerCamelCase ) )
lowercase_ :List[Any] = 20 * np.logaa(__lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 ,samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
lowercase_ :Tuple = get_bounds(__lowerCamelCase ,__lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) ,min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(__lowerCamelCase )
plt.show()
def UpperCAmelCase_ ( __lowerCamelCase : FilterType ,__lowerCamelCase : int ):
lowercase_ :Union[str, Any] = 5_12
lowercase_ :Union[str, Any] = [1] + [0] * (size - 1)
lowercase_ :Any = [filter_type.process(__lowerCamelCase ) for item in inputs]
lowercase_ :Union[str, Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase_ :Union[str, Any] = np.angle(np.fft.fft(__lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 ,samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi ,2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(__lowerCamelCase ,-2 * pi ) )
plt.show()
| 172 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase ( A : str ):
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(A ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(A ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
def __init__( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : str=13 , lowerCAmelCase__ : List[Any]=7 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Dict=99 , lowerCAmelCase__ : List[str]=32 , lowerCAmelCase__ : str=5 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Any=37 , lowerCAmelCase__ : Tuple="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : str=5_12 , lowerCAmelCase__ : List[str]=16 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Union[str, Any]=0.02 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Tuple=4 , lowerCAmelCase__ : List[Any]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Tuple = use_input_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : Tuple = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : str = type_vocab_size
SCREAMING_SNAKE_CASE : int = type_sequence_label_size
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Tuple = num_labels
SCREAMING_SNAKE_CASE : List[Any] = num_choices
SCREAMING_SNAKE_CASE : Any = scope
def __lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self : List[Any] ):
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def __lowercase ( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = BioGptModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : str , *lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# create attention mask
SCREAMING_SNAKE_CASE : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.seq_length // 2
SCREAMING_SNAKE_CASE : Any = 0
# first forward pass
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor((1,) , lowerCAmelCase__ ).item() + 1
SCREAMING_SNAKE_CASE : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
SCREAMING_SNAKE_CASE : str = random_other_next_tokens
# append to next input_ids and attn_mask
SCREAMING_SNAKE_CASE : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE : str = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCAmelCase__ )] , dim=1 , )
# get two different outputs
SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )['''last_hidden_state''']
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
def __lowercase ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , *lowerCAmelCase__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = BioGptModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase__ )
# first forward pass
SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )['''last_hidden_state''']
SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[
'''last_hidden_state'''
]
# select random slice
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
def __lowercase ( self : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , *lowerCAmelCase__ : Any , lowerCAmelCase__ : int=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = BioGptForCausalLM(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __lowercase ( self : Any , lowerCAmelCase__ : str , *lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __lowercase ( self : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , *lowerCAmelCase__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = BioGptForTokenClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
_lowerCAmelCase : Dict = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : Optional[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : List[str] = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : Dict = False
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def __lowercase ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __lowercase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : Optional[Any] = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCAmelCase__ )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCAmelCase__ , gradient_checkpointing=lowerCAmelCase__ )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCAmelCase__ )
def __lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCAmelCase__ )
def __lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCAmelCase__ )
@slow
def __lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = '''left'''
# Define PAD Token = EOS Token = 50256
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE : Dict = model.config.eos_token_id
# use different length sentences to test batching
SCREAMING_SNAKE_CASE : Any = [
'''Hello, my dog is a little''',
'''Today, I''',
]
SCREAMING_SNAKE_CASE : List[str] = tokenizer(lowerCAmelCase__ , return_tensors='''pt''' , padding=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = inputs['''input_ids'''].to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(
input_ids=lowerCAmelCase__ , attention_mask=inputs['''attention_mask'''].to(lowerCAmelCase__ ) , )
SCREAMING_SNAKE_CASE : int = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = model.generate(input_ids=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(input_ids=lowerCAmelCase__ , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
@slow
def __lowercase ( self : Tuple ):
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[Any] = BioGptModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __lowercase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = 3
SCREAMING_SNAKE_CASE : Dict = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE : str = input_ids.ne(1 ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = BioGptForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Optional[Any] = '''multi_label_classification'''
SCREAMING_SNAKE_CASE : Any = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE : Any = input_ids.ne(1 ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE : List[Any] = BioGptForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def __lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE : str = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ )[0]
SCREAMING_SNAKE_CASE : Tuple = 4_23_84
SCREAMING_SNAKE_CASE : List[Any] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def __lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
SCREAMING_SNAKE_CASE : Optional[Any] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(lowerCAmelCase__ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(
**lowerCAmelCase__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 464 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
UpperCAmelCase__ : str = False
class A ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
lowerCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
image=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
lowerCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase__ = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 48 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = "▁"
UpperCAmelCase__ : List[str] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase__ : Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase__ : Optional[Any] = {
"facebook/mbart-large-50-one-to-many-mmt": 10_24,
}
# fmt: off
UpperCAmelCase__ : Tuple = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Optional[int] = VOCAB_FILES_NAMES
snake_case__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Tuple = ['input_ids', 'attention_mask']
snake_case__ :List[int] = []
snake_case__ :List[int] = []
def __init__( self : int , __magic_name__ : int , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]="</s>" , __magic_name__ : List[Any]="</s>" , __magic_name__ : List[Any]="<s>" , __magic_name__ : Tuple="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : List[Any]="<mask>" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : List[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase__ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__magic_name__ , tgt_lang=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
lowerCAmelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase__ = 1
lowerCAmelCase__ = len(self.sp_model )
lowerCAmelCase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__magic_name__ )
}
lowerCAmelCase__ = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase__ = src_lang if src_lang is not None else "en_XX"
lowerCAmelCase__ = self.lang_code_to_id[self._src_lang]
lowerCAmelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : List[Any] , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str ):
"""simple docstring"""
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : int ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = ""
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__magic_name__ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(__magic_name__ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(__magic_name__ )
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , "wb" ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
lowerCAmelCase__ = [1] * len(self.prefix_tokens )
lowerCAmelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones
return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = self.convert_tokens_to_ids(__magic_name__ )
lowerCAmelCase__ = tgt_lang_id
return inputs
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[src_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[tgt_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
| 48 | 1 |
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {
0: """0""",
1: """1""",
2: """2""",
3: """3""",
4: """4""",
5: """5""",
6: """6""",
7: """7""",
8: """8""",
9: """9""",
1_0: """a""",
1_1: """b""",
1_2: """c""",
1_3: """d""",
1_4: """e""",
1_5: """f""",
}
def __lowerCAmelCase ( __lowerCAmelCase : float ) -> str:
assert type(__lowerCAmelCase ) in (int, float) and decimal == int(__lowerCAmelCase )
_UpperCamelCase : Optional[Any] = int(__lowerCAmelCase )
_UpperCamelCase : Optional[int] = ""
_UpperCamelCase : List[str] = False
if decimal < 0:
_UpperCamelCase : List[str] = True
decimal *= -1
while decimal > 0:
_UpperCamelCase , _UpperCamelCase : str = divmod(__lowerCAmelCase , 16 )
_UpperCamelCase : Any = values[remainder] + hexadecimal
_UpperCamelCase : Dict = "0x" + hexadecimal
if negative:
_UpperCamelCase : int = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 239 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_SCREAMING_SNAKE_CASE = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def __lowerCAmelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ) -> str:
_UpperCamelCase : str = state_dict.pop(__lowerCAmelCase )
_UpperCamelCase : Union[str, Any] = val
def __lowerCAmelCase ( __lowerCAmelCase : Optional[int] ) -> Optional[Any]:
_UpperCamelCase : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_UpperCamelCase : Optional[Any] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_UpperCamelCase : str = value
else:
_UpperCamelCase : str = value
return new_state_dict
def __lowerCAmelCase ( __lowerCAmelCase : str ) -> List[str]:
_UpperCamelCase : Any = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCamelCase : Any = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
_UpperCamelCase : List[str] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : Tuple = in_proj_weight[:256, :]
_UpperCamelCase : Dict = in_proj_bias[:256]
_UpperCamelCase : int = in_proj_weight[256:512, :]
_UpperCamelCase : Tuple = in_proj_bias[256:512]
_UpperCamelCase : str = in_proj_weight[-256:, :]
_UpperCamelCase : int = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_UpperCamelCase : int = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
_UpperCamelCase : Optional[int] = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase : List[Any] = in_proj_weight[:256, :]
_UpperCamelCase : str = in_proj_bias[:256]
_UpperCamelCase : Optional[Any] = in_proj_weight[256:512, :]
_UpperCamelCase : str = in_proj_bias[256:512]
_UpperCamelCase : Tuple = in_proj_weight[-256:, :]
_UpperCamelCase : Optional[Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_UpperCamelCase : Any = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
_UpperCamelCase : Tuple = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_UpperCamelCase : Optional[Any] = in_proj_weight_cross_attn[:256, :]
_UpperCamelCase : str = in_proj_bias_cross_attn[:256]
_UpperCamelCase : Dict = in_proj_weight_cross_attn[256:512, :]
_UpperCamelCase : int = in_proj_bias_cross_attn[256:512]
_UpperCamelCase : int = in_proj_weight_cross_attn[-256:, :]
_UpperCamelCase : Optional[Any] = in_proj_bias_cross_attn[-256:]
def __lowerCAmelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> List[Any]:
_UpperCamelCase , _UpperCamelCase : List[str] = image.size
_UpperCamelCase : Dict = max(__lowerCAmelCase , __lowerCAmelCase )
_UpperCamelCase : Tuple = 800 if "detection" in checkpoint_url else 1000
_UpperCamelCase : Tuple = target_max_size / current_max_size
_UpperCamelCase : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __lowerCAmelCase ( __lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
_UpperCamelCase : Optional[int] = F.to_tensor(__lowerCAmelCase )
_UpperCamelCase : Dict = F.normalize(__lowerCAmelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __lowerCAmelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ) -> Tuple:
logger.info("Converting model..." )
# load original state dict
_UpperCamelCase : Tuple = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCamelCase : List[str] = rename_backbone_keys(__lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCamelCase : List[Any] = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_UpperCamelCase : Dict = state_dict.pop(__lowerCAmelCase )
_UpperCamelCase : Any = val
# create HuggingFace model and load state dict
_UpperCamelCase : str = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
_UpperCamelCase : List[str] = 15
_UpperCamelCase : Optional[Any] = 2
_UpperCamelCase : Optional[Any] = {0: "table", 1: "table rotated"}
_UpperCamelCase : Any = idalabel
_UpperCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
else:
_UpperCamelCase : Any = 125
_UpperCamelCase : List[str] = 6
_UpperCamelCase : str = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
_UpperCamelCase : Optional[Any] = idalabel
_UpperCamelCase : str = {v: k for k, v in idalabel.items()}
_UpperCamelCase : str = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
_UpperCamelCase : Any = TableTransformerForObjectDetection(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# verify our conversion
_UpperCamelCase : str = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
_UpperCamelCase : Dict = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=__lowerCAmelCase )
_UpperCamelCase : Dict = Image.open(__lowerCAmelCase ).convert("RGB" )
_UpperCamelCase : List[Any] = normalize(resize(__lowerCAmelCase , __lowerCAmelCase ) ).unsqueeze(0 )
_UpperCamelCase : str = model(__lowerCAmelCase )
if "detection" in checkpoint_url:
_UpperCamelCase : Any = (1, 15, 3)
_UpperCamelCase : Optional[Any] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
_UpperCamelCase : Any = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
_UpperCamelCase : Any = (1, 125, 7)
_UpperCamelCase : List[Any] = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
_UpperCamelCase : Optional[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
_UpperCamelCase : Any = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(__lowerCAmelCase )
image_processor.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 239 | 1 |
'''simple docstring'''
def __UpperCamelCase ( a : int ) ->None:
snake_case = generate_pascal_triangle(a )
for row_idx in range(a ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def __UpperCamelCase ( a : int ) ->list[list[int]]:
if not isinstance(a , a ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
snake_case = []
for current_row_idx in range(a ):
snake_case = populate_current_row(a , a )
triangle.append(a )
return triangle
def __UpperCamelCase ( a : list[list[int]] , a : int ) ->list[int]:
snake_case = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
snake_case , snake_case = 1, 1
for current_col_idx in range(1 , a ):
calculate_current_element(
a , a , a , a )
return current_row
def __UpperCamelCase ( a : list[list[int]] , a : list[int] , a : int , a : int , ) ->None:
snake_case = triangle[current_row_idx - 1][current_col_idx - 1]
snake_case = triangle[current_row_idx - 1][current_col_idx]
snake_case = above_to_left_elt + above_to_right_elt
def __UpperCamelCase ( a : int ) ->list[list[int]]:
if not isinstance(a , a ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
snake_case = [[1]]
for row_index in range(1 , a ):
snake_case = [0] + result[-1] + [0]
snake_case = row_index + 1
# Calculate the number of distinct elements in a row
snake_case = sum(divmod(a , 2 ) )
snake_case = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
snake_case = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
snake_case = row_first_half + row_second_half
result.append(a )
return result
def __UpperCamelCase ( ) ->None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(a : Callable , a : int ) -> None:
snake_case = f"""{func.__name__}({value})"""
snake_case = timeit(f"""__main__.{call}""" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(a , a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 342 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class _lowercase ( unittest.TestCase ):
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=5 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=16 , A__=2 , A__=0.0_2 , A__=4 , ) -> Optional[Any]:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_attention_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_choices
def UpperCamelCase ( self ) -> Optional[int]:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_attention_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase ( self ) -> Tuple:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case = config_and_inputs
snake_case = True
snake_case = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = True
_UpperCAmelCase = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = FlaxBertModelTester(self )
@slow
def UpperCamelCase ( self ) -> List[str]:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
snake_case = FlaxBertModel.from_pretrained('''bert-base-cased''' )
snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(A__ )
| 342 | 1 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCAmelCase ( self ):
A : Optional[int] = ort.SessionOptions()
A : Optional[Any] = False
return options
def _lowerCAmelCase ( self ):
A : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
A : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
A : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
A : List[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""", revision="""onnx""", safety_checker=__UpperCamelCase, feature_extractor=__UpperCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A : Dict = """A red cat sitting on a park bench"""
A : int = np.random.RandomState(0 )
A : List[str] = pipe(
prompt=__UpperCamelCase, image=__UpperCamelCase, mask_image=__UpperCamelCase, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=__UpperCamelCase, output_type="""np""", )
A : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 707 |
import os
import sys
import unittest
SCREAMING_SNAKE_CASE_:Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
SCREAMING_SNAKE_CASE_:Optional[int] = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
SCREAMING_SNAKE_CASE_:List[str] = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Optional[int] = get_test_to_tester_mapping(lowerCamelCase__ )
A : str = get_test_to_tester_mapping(lowerCamelCase__ )
A : int = {"""BertModelTest""": """BertModelTester"""}
A : Tuple = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ), lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ), lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[Any] = get_model_to_test_mapping(lowerCamelCase__ )
A : str = get_model_to_test_mapping(lowerCamelCase__ )
A : Union[str, Any] = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A : str = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ), lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ), lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Tuple = get_model_to_tester_mapping(lowerCamelCase__ )
A : Optional[int] = get_model_to_tester_mapping(lowerCamelCase__ )
A : int = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A : Optional[Any] = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ), lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ), lowerCamelCase__ )
| 520 | 0 |
from math import ceil
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =list(range(0 , lowercase__ ) )
UpperCAmelCase_ =[item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
UpperCAmelCase_ =[]
for i in device_map_blocks:
if device_map_blocks.count(lowercase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowercase__ )
# Missing blocks
UpperCAmelCase_ =[i for i in blocks if i not in device_map_blocks]
UpperCAmelCase_ =[i for i in device_map_blocks if i not in blocks]
if len(lowercase__ ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(lowercase__ ) )
if len(lowercase__ ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(lowercase__ ) )
if len(lowercase__ ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(lowercase__ ) )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =list(range(lowercase__ ) )
UpperCAmelCase_ =int(ceil(n_layers / len(lowercase__ ) ) )
UpperCAmelCase_ =[layers[i : i + n_blocks] for i in range(0 , lowercase__ , lowercase__ )]
return dict(zip(lowercase__ , lowercase__ ) )
| 54 |
from __future__ import annotations
import requests
def __a ( __UpperCAmelCase ):
a__ = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(__UpperCAmelCase ).json()
def __a ( __UpperCAmelCase = 10 ):
a__ = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
a__ = requests.get(__UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(__UpperCAmelCase ) for story_id in story_ids]
def __a ( __UpperCAmelCase = 10 ):
a__ = hackernews_top_stories(__UpperCAmelCase )
return "\n".join('''* [{title}]({url})'''.format(**__UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 194 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self ):
lowerCamelCase__ = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
lowerCamelCase__ = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
lowerCamelCase__ = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
lowerCamelCase__ = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_6000,
"return_attention_mask": False,
"do_normalize": True,
}
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase__ = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + "\n" )
# load decoder from hub
lowerCamelCase__ = "hf-internal-testing/ngram-beam-search-decoder"
def __magic_name__ ( self , **_lowerCAmelCase ):
lowerCamelCase__ = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __magic_name__ ( self , **_lowerCAmelCase ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def __magic_name__ ( self , **_lowerCAmelCase ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_lowerCAmelCase )
def __magic_name__ ( self ):
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ):
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = self.get_decoder()
lowerCamelCase__ = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _lowerCAmelCase )
def __magic_name__ ( self ):
lowerCamelCase__ = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowerCamelCase__ = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __magic_name__ ( self ):
lowerCamelCase__ = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(_lowerCAmelCase , "include" ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __magic_name__ ( self ):
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_decoder()
lowerCamelCase__ = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowerCamelCase__ = floats_list((3, 1000) )
lowerCamelCase__ = feature_extractor(_lowerCAmelCase , return_tensors="np" )
lowerCamelCase__ = processor(_lowerCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __magic_name__ ( self ):
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_decoder()
lowerCamelCase__ = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowerCamelCase__ = "This is a test string"
lowerCamelCase__ = processor(text=_lowerCAmelCase )
lowerCamelCase__ = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ ( self , _lowerCAmelCase=(2, 10, 16) , _lowerCAmelCase=77 ):
np.random.seed(_lowerCAmelCase )
return np.random.rand(*_lowerCAmelCase )
def __magic_name__ ( self ):
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_decoder()
lowerCamelCase__ = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowerCamelCase__ = self._get_dummy_logits(shape=(10, 16) , seed=13 )
lowerCamelCase__ = processor.decode(_lowerCAmelCase )
lowerCamelCase__ = decoder.decode_beams(_lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def __magic_name__ ( self , _lowerCAmelCase ):
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_decoder()
lowerCamelCase__ = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowerCamelCase__ = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase )
else:
with get_context(_lowerCAmelCase ).Pool() as pool:
lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = list(_lowerCAmelCase )
with get_context("fork" ).Pool() as p:
lowerCamelCase__ = decoder.decode_beams_batch(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCAmelCase , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(_lowerCAmelCase , decoded_processor.logit_score )
self.assertListEqual(_lowerCAmelCase , decoded_processor.lm_score )
def __magic_name__ ( self ):
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_decoder()
lowerCamelCase__ = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowerCamelCase__ = self._get_dummy_logits()
lowerCamelCase__ = 15
lowerCamelCase__ = -20.0
lowerCamelCase__ = -4.0
lowerCamelCase__ = processor.batch_decode(
_lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
lowerCamelCase__ = decoded_processor_out.text
lowerCamelCase__ = list(_lowerCAmelCase )
with get_context("fork" ).Pool() as pool:
lowerCamelCase__ = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
lowerCamelCase__ = [d[0][0] for d in decoded_decoder_out]
lowerCamelCase__ = [d[0][2] for d in decoded_decoder_out]
lowerCamelCase__ = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , _lowerCAmelCase )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _lowerCAmelCase , atol=1E-3 ) )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , _lowerCAmelCase , atol=1E-3 ) )
def __magic_name__ ( self ):
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_decoder()
lowerCamelCase__ = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowerCamelCase__ = self._get_dummy_logits()
lowerCamelCase__ = 2.0
lowerCamelCase__ = 5.0
lowerCamelCase__ = -20.0
lowerCamelCase__ = True
lowerCamelCase__ = processor.batch_decode(
_lowerCAmelCase , alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
lowerCamelCase__ = decoded_processor_out.text
lowerCamelCase__ = list(_lowerCAmelCase )
decoder.reset_params(
alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
with get_context("fork" ).Pool() as pool:
lowerCamelCase__ = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , )
lowerCamelCase__ = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , _lowerCAmelCase )
lowerCamelCase__ = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _lowerCAmelCase )
def __magic_name__ ( self ):
lowerCamelCase__ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
lowerCamelCase__ = processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase__ = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
lowerCamelCase__ = os.listdir(_lowerCAmelCase )
lowerCamelCase__ = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __magic_name__ ( self ):
lowerCamelCase__ = snapshot_download("hf-internal-testing/processor_with_lm" )
lowerCamelCase__ = WavaVecaProcessorWithLM.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = processor.decoder.model_container[processor.decoder._model_key]
lowerCamelCase__ = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
lowerCamelCase__ = os.listdir(_lowerCAmelCase )
lowerCamelCase__ = os.listdir(_lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def __magic_name__ ( self ):
lowerCamelCase__ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
lowerCamelCase__ = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
lowerCamelCase__ = floats_list((3, 1000) )
lowerCamelCase__ = processor_wavaveca(_lowerCAmelCase , return_tensors="np" )
lowerCamelCase__ = processor_auto(_lowerCAmelCase , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
lowerCamelCase__ = self._get_dummy_logits()
lowerCamelCase__ = processor_wavaveca.batch_decode(_lowerCAmelCase )
lowerCamelCase__ = processor_auto.batch_decode(_lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __magic_name__ ( self ):
lowerCamelCase__ = self.get_feature_extractor()
lowerCamelCase__ = self.get_tokenizer()
lowerCamelCase__ = self.get_decoder()
lowerCamelCase__ = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def __magic_name__ ( _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = [d[key] for d in offsets]
return retrieved_list
def __magic_name__ ( self ):
lowerCamelCase__ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
lowerCamelCase__ = self._get_dummy_logits()[0]
lowerCamelCase__ = processor.decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def __magic_name__ ( self ):
lowerCamelCase__ = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
lowerCamelCase__ = self._get_dummy_logits()
lowerCamelCase__ = processor.batch_decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(_lowerCAmelCase , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __magic_name__ ( self ):
import torch
lowerCamelCase__ = load_dataset("common_voice" , "en" , split="train" , streaming=_lowerCAmelCase )
lowerCamelCase__ = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_6000 ) )
lowerCamelCase__ = iter(_lowerCAmelCase )
lowerCamelCase__ = next(_lowerCAmelCase )
lowerCamelCase__ = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
lowerCamelCase__ = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowerCamelCase__ = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
lowerCamelCase__ = model(_lowerCAmelCase ).logits.cpu().numpy()
lowerCamelCase__ = processor.decode(logits[0] , output_word_offsets=_lowerCAmelCase )
lowerCamelCase__ = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowerCamelCase__ = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
lowerCamelCase__ = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(_lowerCAmelCase , "word" ) ) , _lowerCAmelCase )
self.assertEqual(" ".join(self.get_from_offsets(_lowerCAmelCase , "word" ) ) , output.text )
# output times
lowerCamelCase__ = torch.tensor(self.get_from_offsets(_lowerCAmelCase , "start_time" ) )
lowerCamelCase__ = torch.tensor(self.get_from_offsets(_lowerCAmelCase , "end_time" ) )
# fmt: off
lowerCamelCase__ = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
lowerCamelCase__ = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
| 360 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
A_ = get_logger(__name__)
A_ = r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
@add_start_docstrings(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
@add_start_docstrings(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
@add_start_docstrings(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ):
for processor in self:
lowerCamelCase__ = inspect.signature(processor.__call__ ).parameters
if len(_lowerCAmelCase ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"Make sure that all the required parameters: {list(function_args.keys() )} for "
F"{processor.__class__} are passed to the logits processor." )
lowerCamelCase__ = processor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
else:
lowerCamelCase__ = processor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not (temperature > 0):
raise ValueError(F"`temperature` has to be a strictly positive float, but is {temperature}" )
lowerCamelCase__ = temperature
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = scores / self.temperature
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = -float("Inf" ) , _lowerCAmelCase = 1 ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"`top_p` has to be a float > 0 and < 1, but is {top_p}" )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or (min_tokens_to_keep < 1):
raise ValueError(F"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" )
lowerCamelCase__ = top_p
lowerCamelCase__ = filter_value
lowerCamelCase__ = min_tokens_to_keep
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ = lax.top_k(_lowerCAmelCase , scores.shape[-1] )
lowerCamelCase__ = jnp.full_like(_lowerCAmelCase , self.filter_value )
lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase , axis=-1 ).cumsum(axis=-1 )
lowerCamelCase__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowerCamelCase__ = jnp.roll(_lowerCAmelCase , 1 )
score_mask |= score_mask.at[:, 0].set(_lowerCAmelCase )
# min tokens to keep
lowerCamelCase__ = score_mask.at[:, : self.min_tokens_to_keep].set(_lowerCAmelCase )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jax.lax.sort_key_val(_lowerCAmelCase , _lowerCAmelCase )[-1]
return next_scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = -float("Inf" ) , _lowerCAmelCase = 1 ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or top_k <= 0:
raise ValueError(F"`top_k` has to be a strictly positive integer, but is {top_k}" )
lowerCamelCase__ = max(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = filter_value
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ = scores.shape
lowerCamelCase__ = jnp.full(batch_size * vocab_size , self.filter_value )
lowerCamelCase__ = min(self.top_k , scores.shape[-1] ) # Safety check
lowerCamelCase__ , lowerCamelCase__ = lax.top_k(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.broadcast_to((jnp.arange(_lowerCAmelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
lowerCamelCase__ = topk_scores.flatten()
lowerCamelCase__ = topk_indices.flatten() + shift
lowerCamelCase__ = next_scores_flat.at[topk_indices_flat].set(_lowerCAmelCase )
lowerCamelCase__ = next_scores_flat.reshape(_lowerCAmelCase , _lowerCAmelCase )
return next_scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase ):
lowerCamelCase__ = bos_token_id
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = jnp.full(scores.shape , -float("inf" ) )
lowerCamelCase__ = 1 - jnp.bool_(cur_len - 1 )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , new_scores.at[:, self.bos_token_id].set(0 ) , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = max_length
lowerCamelCase__ = eos_token_id
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = jnp.full(scores.shape , -float("inf" ) )
lowerCamelCase__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , new_scores.at[:, self.eos_token_id].set(0 ) , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or min_length < 0:
raise ValueError(F"`min_length` has to be a positive integer, but is {min_length}" )
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or eos_token_id < 0:
raise ValueError(F"`eos_token_id` has to be a positive integer, but is {eos_token_id}" )
lowerCamelCase__ = min_length
lowerCamelCase__ = eos_token_id
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# create boolean flag to decide if min length penalty should be applied
lowerCamelCase__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = list(_lowerCAmelCase )
lowerCamelCase__ = begin_index
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = 1 - jnp.bool_(cur_len - self.begin_index )
lowerCamelCase__ = jnp.where(_lowerCAmelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , _lowerCAmelCase )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase ):
lowerCamelCase__ = list(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase ):
lowerCamelCase__ = dict(_lowerCAmelCase )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowerCamelCase__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowerCamelCase__ = force_token_array.at[index].set(_lowerCAmelCase )
lowerCamelCase__ = jnp.intaa(_lowerCAmelCase )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
def _force_token(_lowerCAmelCase ):
lowerCamelCase__ = scores.shape[0]
lowerCamelCase__ = self.force_token_array[generation_idx]
lowerCamelCase__ = jnp.ones_like(_lowerCAmelCase , dtype=scores.dtype ) * -float("inf" )
lowerCamelCase__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
lowerCamelCase__ = lax.dynamic_update_slice(_lowerCAmelCase , _lowerCAmelCase , (0, current_token) )
return new_scores
lowerCamelCase__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_lowerCAmelCase ) , lambda: scores , ) , )
return scores
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = generate_config.eos_token_id
lowerCamelCase__ = generate_config.no_timestamps_token_id
lowerCamelCase__ = generate_config.no_timestamps_token_id + 1
lowerCamelCase__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_lowerCAmelCase , "max_initial_timestamp_index" ):
lowerCamelCase__ = generate_config.max_initial_timestamp_index
else:
lowerCamelCase__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowerCamelCase__ = model_config.vocab_size
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# suppress <|notimestamps|> which is handled by without_timestamps
lowerCamelCase__ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(_lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = jnp.where((cur_len - self.begin_index) >= 1 , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _lowerCAmelCase , )
lowerCamelCase__ = jnp.where((cur_len - self.begin_index) < 2 , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _lowerCAmelCase , _lowerCAmelCase , )
return jnp.where(
_lowerCAmelCase , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , _lowerCAmelCase , )
lowerCamelCase__ = jax.vmap(_lowerCAmelCase )(_lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.where(cur_len == self.begin_index , _lowerCAmelCase , _lowerCAmelCase )
lowerCamelCase__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _lowerCAmelCase , )
lowerCamelCase__ = self.timestamp_begin + self.max_initial_timestamp_index
lowerCamelCase__ = jnp.where(
_lowerCAmelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , _lowerCAmelCase , )
# if sum of probability over timestamps is above any other token, sample timestamp
lowerCamelCase__ = jax.nn.log_softmax(_lowerCAmelCase , axis=-1 )
def handle_cumulative_probs(_lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
lowerCamelCase__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , _lowerCAmelCase , )
lowerCamelCase__ = jax.vmap(_lowerCAmelCase )(_lowerCAmelCase , _lowerCAmelCase )
return scores
| 360 | 1 |
"""simple docstring"""
_a = [0, 2, 4, 6, 8]
_a = [1, 3, 5, 7, 9]
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1, -1, -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_UpperCamelCase = 0
for digit in range(10 ):
_UpperCamelCase = digit
result += reversible_numbers(
0, (remainder + 2 * digit) // 10, __snake_case, __snake_case )
return result
_UpperCamelCase = 0
for digita in range(10 ):
_UpperCamelCase = digita
if (remainder + digita) % 2 == 0:
_UpperCamelCase = ODD_DIGITS
else:
_UpperCamelCase = EVEN_DIGITS
for digita in other_parity_digits:
_UpperCamelCase = digita
result += reversible_numbers(
remaining_length - 2, (remainder + digita + digita) // 10, __snake_case, __snake_case, )
return result
def lowerCamelCase__ ( __snake_case = 9 ) -> int:
"""simple docstring"""
_UpperCamelCase = 0
for length in range(1, max_power + 1 ):
result += reversible_numbers(__snake_case, 0, [0] * length, __snake_case )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 19 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_ = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''DeiTFeatureExtractor''']
SCREAMING_SNAKE_CASE_ = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 426 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ReformerTokenizer
_SCREAMING_SNAKE_CASE = ReformerTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = True
def A ( self : Any ):
"""simple docstring"""
super().setUp()
UpperCamelCase = ReformerTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = '<s>'
UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(UpperCamelCase__ ) , 1_0_0_0 )
def A ( self : List[str] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def A ( self : Optional[int] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = tokenizer.tokenize(UpperCamelCase__ )
UpperCamelCase = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
UpperCamelCase = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = tokenizer.encode(UpperCamelCase__ )
UpperCamelCase = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Optional[int] , UpperCamelCase__ : Optional[Any]=1_5 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
# Simple input
UpperCamelCase = 'This is a simple input'
UpperCamelCase = ['This is a simple input 1', 'This is a simple input 2']
UpperCamelCase = ('This is a simple input', 'This is a pair')
UpperCamelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
# Simple input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
# Simple input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' , )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
# Pair input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' , )
def A ( self : int ):
"""simple docstring"""
pass
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ReformerTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
UpperCamelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCamelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
UpperCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
UpperCamelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def A ( self : int ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' )
@slow
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = 'Hello World!'
UpperCamelCase = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
UpperCamelCase = [
1_0_8,
2_6_5,
2_4,
1_1_1,
4,
2_5_8,
1_5_6,
3_5,
2_8,
2_7_5,
3,
2_5_9,
2_9_7,
2_6_0,
8_4,
4,
3_5,
1_1_0,
4_4,
8,
2_5_9,
9_1,
2_6_8,
2_1,
1_1,
2_0_9,
2_7_4,
1_0_9,
2_6_6,
2_7_7,
1_1_7,
8_6,
9_3,
3_1_5,
2_5_8,
2_7_8,
2_5_8,
2_7_7,
2_5_8,
0,
2_5_8,
2_8_8,
2_5_8,
3_1_9,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
2_8_7,
2_5_8,
3_1_5,
2_5_8,
2_8_9,
2_5_8,
2_7_8,
9_9,
2_6_9,
2_6_6,
2_6_2,
8,
2_5_9,
2_4_1,
4,
2_1_7,
2_3_0,
2_6_8,
2_6_6,
5_5,
1_6_8,
1_0_6,
7_5,
1_9_3,
2_6_6,
2_2_3,
2_7,
4_9,
2_6,
2_8_2,
2_5,
2_6_4,
2_9_9,
1_9,
2_6,
0,
2_5_8,
2_7_7,
1_1_7,
8_6,
9_3,
1_7_6,
1_8_3,
2_7_0,
1_1,
2_6_2,
4_2,
6_1,
2_6_5,
]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@require_torch
@slow
def A ( self : int ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
UpperCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
UpperCamelCase = ' '.join(UpperCamelCase__ )
UpperCamelCase = self.big_tokenizer.encode_plus(UpperCamelCase__ , return_tensors='pt' )
UpperCamelCase = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='pt' )
UpperCamelCase = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
UpperCamelCase = encoded_sequence['input_ids'].shape
UpperCamelCase = ReformerModel(UpperCamelCase__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCamelCase__ )
model(**UpperCamelCase__ )
@slow
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = {'input_ids': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
UpperCamelCase = [
'This is a very simple sentence.',
'The quick brown fox jumps over the lazy dog.',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='google/reformer-crime-and-punishment' , revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a' , padding=UpperCamelCase__ , sequences=UpperCamelCase__ , )
| 324 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """gpt_neo"""
_SCREAMING_SNAKE_CASE = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : List[str] , UpperCamelCase__ : List[str]=5_0_2_5_7 , UpperCamelCase__ : Dict=2_0_4_8 , UpperCamelCase__ : str=2_0_4_8 , UpperCamelCase__ : List[str]=2_4 , UpperCamelCase__ : Any=[[["global", "local"], 1_2]] , UpperCamelCase__ : Union[str, Any]=1_6 , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : Union[str, Any]="gelu_new" , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[Any]=1E-5 , UpperCamelCase__ : Optional[Any]=0.0_2 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[int]=5_0_2_5_6 , UpperCamelCase__ : Tuple=5_0_2_5_6 , **UpperCamelCase__ : Optional[Any] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = num_layers
UpperCamelCase = num_heads
UpperCamelCase = intermediate_size
UpperCamelCase = window_size
UpperCamelCase = activation_function
UpperCamelCase = resid_dropout
UpperCamelCase = embed_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = classifier_dropout
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = use_cache
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
UpperCamelCase = attention_types
UpperCamelCase = self.expand_attention_types_params(UpperCamelCase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
f"""`config.num_layers = {self.num_layers}`. """
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
@staticmethod
def A ( UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> Any:
"""simple docstring"""
import torch
UpperCamelCase = input.size()
UpperCamelCase = len(A__ )
UpperCamelCase = shape[dimension]
UpperCamelCase = torch.arange(0 , A__ , A__ )
UpperCamelCase = torch.div(sizedim - size , A__ , rounding_mode='floor' ) + 1
UpperCamelCase = torch.arange(A__ ) + low_indices[:min_length][:, None]
UpperCamelCase = [slice(A__ )] * rank
UpperCamelCase = indices
UpperCamelCase = input[s]
UpperCamelCase = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(A__ )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
import torch
UpperCamelCase = torch.arange(1 , A__ )
UpperCamelCase = torch.remainder(A__ , A__ )
UpperCamelCase = remainders == 0
UpperCamelCase = candidates[divisor_indices]
UpperCamelCase = torch.max(A__ )
return largest_divisor, torch.div(A__ , A__ , rounding_mode='floor' )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
@property
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def A ( self : Dict ):
"""simple docstring"""
return self._config.num_heads
def A ( self : Union[str, Any] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCamelCase = super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase = seqlen + 2
UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCamelCase = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
UpperCamelCase = common_inputs['attention_mask']
if self.use_past:
UpperCamelCase = ordered_inputs['attention_mask'].dtype
UpperCamelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def A ( self : Optional[Any] ):
"""simple docstring"""
return 1_3
| 324 | 1 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase=False ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ =OmegaConf.load(__lowerCAmelCase )
if display:
print(yaml.dump(OmegaConf.to_container(__lowerCAmelCase ) ) )
return config
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> str:
'''simple docstring'''
if conf_path is None:
lowerCamelCase__ ='''./model_checkpoints/vqgan_only.yaml'''
lowerCamelCase__ =load_config(__lowerCAmelCase , display=__lowerCAmelCase )
lowerCamelCase__ =VQModel(**config.model.params )
if ckpt_path is None:
lowerCamelCase__ ='''./model_checkpoints/vqgan_only.pt'''
lowerCamelCase__ =torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
if ".ckpt" in ckpt_path:
lowerCamelCase__ =sd['''state_dict''']
model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
model.to(__lowerCAmelCase )
del sd
return model
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
lowerCamelCase__ =model.encode(__lowerCAmelCase )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
lowerCamelCase__ =model.decode(__lowerCAmelCase )
return xrec
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase=False ) -> str:
'''simple docstring'''
lowerCamelCase__ =string.rsplit("." , 1 )
if reload:
lowerCamelCase__ =importlib.import_module(__lowerCAmelCase )
importlib.reload(__lowerCAmelCase )
return getattr(importlib.import_module(__lowerCAmelCase , package=__lowerCAmelCase ) , cls )
def lowerCamelCase_ ( __lowerCAmelCase ) -> int:
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True ) -> Dict:
'''simple docstring'''
lowerCamelCase__ =instantiate_from_config(__lowerCAmelCase )
if sd is not None:
model.load_state_dict(__lowerCAmelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
if ckpt:
lowerCamelCase__ =torch.load(__lowerCAmelCase , map_location="cpu" )
lowerCamelCase__ =pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''' )
else:
lowerCamelCase__ ={'''state_dict''': None}
lowerCamelCase__ =None
lowerCamelCase__ =load_model_from_config(config.model , pl_sd["state_dict"] , gpu=__lowerCAmelCase , eval_mode=__lowerCAmelCase )['''model''']
return model, global_step
| 530 |
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
__lowercase : int = ''''''
for word_or_phrase in separated:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(__lowerCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 509 | 0 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowerCAmelCase__ = """Usage of script: script_name <size_of_canvas:int>"""
lowerCAmelCase__ = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> list[list[bool]]:
'''simple docstring'''
_UpperCamelCase : List[str] = [[False for i in range(UpperCAmelCase_ )] for j in range(UpperCAmelCase_ )]
return canvas
def lowerCamelCase_ ( UpperCAmelCase_ : list[list[bool]] ) -> None:
'''simple docstring'''
for i, row in enumerate(UpperCAmelCase_ ):
for j, _ in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : int = bool(random.getrandbits(1 ) )
def lowerCamelCase_ ( UpperCAmelCase_ : list[list[bool]] ) -> list[list[bool]]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = np.array(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(UpperCAmelCase_ ):
for c, pt in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : Tuple = __judge_point(
UpperCAmelCase_ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_UpperCamelCase : Any = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_UpperCamelCase : list[list[bool]] = current_canvas.tolist()
return return_canvas
def lowerCamelCase_ ( UpperCAmelCase_ : bool , UpperCAmelCase_ : list[list[bool]] ) -> bool:
'''simple docstring'''
_UpperCamelCase : Tuple = 0
_UpperCamelCase : Optional[int] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_UpperCamelCase : Dict = pt
if pt:
if alive < 2:
_UpperCamelCase : List[str] = False
elif alive == 2 or alive == 3:
_UpperCamelCase : int = True
elif alive > 3:
_UpperCamelCase : Dict = False
else:
if alive == 3:
_UpperCamelCase : List[Any] = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowerCAmelCase__ = int(sys.argv[1])
# main working structure of this module.
lowerCAmelCase__ = create_canvas(canvas_size)
seed(c)
lowerCAmelCase__ , lowerCAmelCase__ = plt.subplots()
fig.show()
lowerCAmelCase__ = ListedColormap(["""w""", """k"""])
try:
while True:
lowerCAmelCase__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 648 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "rwkv"
a__ = {"max_position_embeddings": "context_length"}
def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ):
_UpperCamelCase : str = vocab_size
_UpperCamelCase : int = context_length
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Dict = rescale_every
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
| 648 | 1 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase ) -> int:
"""simple docstring"""
if len(_lowerCamelCase ) < k or k < 0:
raise ValueError("""Invalid Input""" )
__snake_case : List[str] = sum(array[:k] )
for i in range(len(_lowerCamelCase ) - k ):
__snake_case : Union[str, Any] = current_sum - array[i] + array[i + k]
__snake_case : Optional[Any] = max(_lowerCamelCase , _lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__UpperCamelCase = [randint(-1000, 1000) for i in range(100)]
__UpperCamelCase = randint(0, 110)
print(f"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 26 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[Any] ="levit"
def __init__( self : Optional[int] , a : int=2_24 , a : Union[str, Any]=3 , a : List[Any]=3 , a : Dict=2 , a : Tuple=1 , a : Optional[int]=16 , a : Optional[Any]=[1_28, 2_56, 3_84] , a : Dict=[4, 8, 12] , a : Optional[Any]=[4, 4, 4] , a : Optional[int]=[16, 16, 16] , a : Any=0 , a : List[Any]=[2, 2, 2] , a : Optional[Any]=[2, 2, 2] , a : Union[str, Any]=0.02 , **a : Optional[int] , ):
"""simple docstring"""
super().__init__(**a )
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = kernel_size
__lowerCamelCase = stride
__lowerCamelCase = padding
__lowerCamelCase = hidden_sizes
__lowerCamelCase = num_attention_heads
__lowerCamelCase = depths
__lowerCamelCase = key_dim
__lowerCamelCase = drop_path_rate
__lowerCamelCase = patch_size
__lowerCamelCase = attention_ratio
__lowerCamelCase = mlp_ratio
__lowerCamelCase = initializer_range
__lowerCamelCase = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Tuple =version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return 1e-4
| 546 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=32 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=[10, 20, 30, 40] , lowerCAmelCase__=[2, 2, 3, 2] , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=10 , lowerCAmelCase__=0.02 , lowerCAmelCase__=["stage2", "stage3", "stage4"] , lowerCAmelCase__=[2, 3, 4] , lowerCAmelCase__=None , ) -> Any:
"""simple docstring"""
_UpperCamelCase :Optional[Any] =parent
_UpperCamelCase :int =batch_size
_UpperCamelCase :Dict =image_size
_UpperCamelCase :Dict =num_channels
_UpperCamelCase :List[str] =num_stages
_UpperCamelCase :Optional[int] =hidden_sizes
_UpperCamelCase :List[str] =depths
_UpperCamelCase :str =is_training
_UpperCamelCase :List[str] =use_labels
_UpperCamelCase :Dict =intermediate_size
_UpperCamelCase :Union[str, Any] =hidden_act
_UpperCamelCase :Tuple =num_labels
_UpperCamelCase :str =initializer_range
_UpperCamelCase :Dict =out_features
_UpperCamelCase :Tuple =out_indices
_UpperCamelCase :Tuple =scope
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :Dict =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase :Dict =None
if self.use_labels:
_UpperCamelCase :Any =ids_tensor([self.batch_size] , self.num_labels )
_UpperCamelCase :Dict =self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
"""simple docstring"""
_UpperCamelCase :Optional[Any] =ConvNextModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase :Any =model(lowerCAmelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Any =ConvNextForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase :Tuple =model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] =ConvNextBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase :Union[str, Any] =model(lowerCAmelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_UpperCamelCase :Dict =None
_UpperCamelCase :List[Any] =ConvNextBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase :str =model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Dict =self.prepare_config_and_inputs()
_UpperCamelCase :List[Any] =config_and_inputs
_UpperCamelCase :Optional[int] ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __snake_case , __snake_case , unittest.TestCase ):
__UpperCAmelCase = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :str =ConvNextModelTester(self )
_UpperCamelCase :str =ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase :int =model_class(lowerCAmelCase__ )
_UpperCamelCase :Union[str, Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase :str =[*signature.parameters.keys()]
_UpperCamelCase :Optional[Any] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase :Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase__ )
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase :Optional[int] =model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCamelCase :List[Any] =model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCamelCase :Dict =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase :List[str] =self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase :Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase :Any =True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase :int =True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase :Tuple =ConvNextModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _lowerCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase :List[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase :Optional[Any] =ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(lowerCAmelCase__ )
_UpperCamelCase :Optional[Any] =self.default_image_processor
_UpperCamelCase :Tuple =prepare_img()
_UpperCamelCase :List[Any] =image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCamelCase :Optional[int] =model(**lowerCAmelCase__ )
# verify the logits
_UpperCamelCase :Optional[Any] =torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCamelCase :Tuple =torch.tensor([-0.0260, -0.4739, 0.1911] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@require_torch
class lowerCamelCase__ ( unittest.TestCase , __snake_case ):
__UpperCAmelCase = (ConvNextBackbone,) if is_torch_available() else ()
__UpperCAmelCase = ConvNextConfig
__UpperCAmelCase = False
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :str =ConvNextModelTester(self ) | 704 | '''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def _lowerCAmelCase ( ) -> Dict:
'''simple docstring'''
_UpperCamelCase :int =9
_UpperCamelCase :Optional[int] =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_UpperCamelCase :Optional[int] =kruskal(__a , __a )
_UpperCamelCase :Union[str, Any] =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__a ) == sorted(__a ) | 512 | 0 |
"""simple docstring"""
import inspect
import unittest
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : str ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def snake_case_ ( self : List[str] ):
import diffusers
from diffusers.dependency_versions_table import deps
_UpperCAmelCase : List[Any] = inspect.getmembers(lowerCamelCase__ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
_UpperCAmelCase : Tuple = "k-diffusion"
elif backend == "invisible_watermark":
_UpperCAmelCase : Union[str, Any] = "invisible-watermark"
assert backend in deps, f'{backend} is not in the deps table!'
| 289 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = MgpstrTokenizer
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Any = {}
UpperCamelCase_ : List[str] = False
def UpperCAmelCase_ ( self : List[Any] ) -> int:
"""simple docstring"""
super().setUp()
# fmt: off
__lowercase = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__lowercase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '''\n''' )
def UpperCAmelCase_ ( self : List[str] , **lowerCamelCase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : Tuple , lowerCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = '''tester'''
__lowercase = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__lowercase = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__lowercase = tokenizer.encode([special_token] , add_special_tokens=lowerCamelCase__ )
self.assertEqual(len(lowerCamelCase__ ) , 1 )
__lowercase = tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__lowercase , __lowercase = self.get_input_output_texts(lowerCamelCase__ )
__lowercase = tokenizer.tokenize(lowerCamelCase__ )
__lowercase = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
__lowercase = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertNotEqual(len(lowerCamelCase__ ) , 0 )
__lowercase = tokenizer.decode(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , lowerCamelCase__ )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def UpperCAmelCase_ ( self : Tuple ) -> str:
"""simple docstring"""
pass
| 332 | 0 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
_lowercase = "AutoTokenizer"
_lowercase = ["tokenizer"]
_lowercase = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None ):
super().__init__(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] =speaker_embeddings
@classmethod
def __lowerCamelCase ( cls , __UpperCAmelCase , __UpperCAmelCase="speaker_embeddings_path.json" , **__UpperCAmelCase ):
if speaker_embeddings_dict_path is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =get_file_from_repo(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , subfolder=kwargs.pop('subfolder' , _SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('cache_dir' , _SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('force_download' , _SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('proxies' , _SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('resume_download' , _SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('local_files_only' , _SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('use_auth_token' , _SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('revision' , _SCREAMING_SNAKE_CASE ) , )
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
SCREAMING_SNAKE_CASE_ : int =None
else:
with open(_SCREAMING_SNAKE_CASE ) as speaker_embeddings_json:
SCREAMING_SNAKE_CASE_ : List[str] =json.load(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] =None
SCREAMING_SNAKE_CASE_ : Dict =AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return cls(tokenizer=_SCREAMING_SNAKE_CASE , speaker_embeddings=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase="speaker_embeddings_path.json" , __UpperCAmelCase="speaker_embeddings" , __UpperCAmelCase = False , **__UpperCAmelCase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'v2' ) , exist_ok=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] ={}
SCREAMING_SNAKE_CASE_ : Any =save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
SCREAMING_SNAKE_CASE_ : Dict =self._load_voice_preset(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] ={}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , _SCREAMING_SNAKE_CASE , F"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : List[str] =os.path.join(_SCREAMING_SNAKE_CASE , F"""{prompt_key}_{key}.npy""" )
SCREAMING_SNAKE_CASE_ : List[str] =tmp_dict
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 'w' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
super().save_pretrained(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , __UpperCAmelCase = None , **__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : str =self.speaker_embeddings[voice_preset]
SCREAMING_SNAKE_CASE_ : str ={}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
SCREAMING_SNAKE_CASE_ : int =get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , _SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop('cache_dir' , _SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop('force_download' , _SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop('proxies' , _SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop('resume_download' , _SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop('local_files_only' , _SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop('use_auth_token' , _SCREAMING_SNAKE_CASE ) , revision=kwargs.pop('revision' , _SCREAMING_SNAKE_CASE ) , )
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.""" )
SCREAMING_SNAKE_CASE_ : int =np.load(_SCREAMING_SNAKE_CASE )
return voice_preset_dict
def __lowerCamelCase ( self , __UpperCAmelCase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="pt" , __UpperCAmelCase=256 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , **__UpperCAmelCase , ):
if voice_preset is not None and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
SCREAMING_SNAKE_CASE_ : List[Any] =self._load_voice_preset(_SCREAMING_SNAKE_CASE )
else:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not voice_preset.endswith('.npz' ):
SCREAMING_SNAKE_CASE_ : str =voice_preset + '.npz'
SCREAMING_SNAKE_CASE_ : List[str] =np.load(_SCREAMING_SNAKE_CASE )
if voice_preset is not None:
self._validate_voice_preset_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int =BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple =self.tokenizer(
_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if voice_preset is not None:
SCREAMING_SNAKE_CASE_ : Any =voice_preset
return encoded_text
| 709 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__SCREAMING_SNAKE_CASE = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['DPTFeatureExtractor']
__SCREAMING_SNAKE_CASE = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 153 | 0 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowerCAmelCase__ : Dict =logging.get_logger(__name__)
lowerCAmelCase__ : int ={
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase__ : Optional[Any] ={
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
lowerCAmelCase__ : int ={
'facebook/blenderbot_small-90M': 5_12,
}
class __lowercase (__lowerCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = BlenderbotSmallTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=_lowerCamelCase , merges=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , ) , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE_ : List[str] = add_prefix_space
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 101 | """simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
if len(__lowerCAmelCase ) != 2 or len(a[0] ) != 2 or len(__lowerCAmelCase ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
lowerCamelCase__ =[
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCAmelCase ) )
]
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCAmelCase ) )
]
def lowerCamelCase_ ( __lowerCAmelCase ) -> tuple[list, list, list, list]:
'''simple docstring'''
if len(__lowerCAmelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
lowerCamelCase__ =len(__lowerCAmelCase )
lowerCamelCase__ =matrix_length // 2
lowerCamelCase__ =[[a[i][j] for j in range(__lowerCAmelCase , __lowerCAmelCase )] for i in range(__lowerCAmelCase )]
lowerCamelCase__ =[
[a[i][j] for j in range(__lowerCAmelCase , __lowerCAmelCase )] for i in range(__lowerCAmelCase , __lowerCAmelCase )
]
lowerCamelCase__ =[[a[i][j] for j in range(__lowerCAmelCase )] for i in range(__lowerCAmelCase )]
lowerCamelCase__ =[[a[i][j] for j in range(__lowerCAmelCase )] for i in range(__lowerCAmelCase , __lowerCAmelCase )]
return top_left, top_right, bot_left, bot_right
def lowerCamelCase_ ( __lowerCAmelCase ) -> tuple[int, int]:
'''simple docstring'''
return len(__lowerCAmelCase ), len(matrix[0] )
def lowerCamelCase_ ( __lowerCAmelCase ) -> None:
'''simple docstring'''
print("\n".join(str(__lowerCAmelCase ) for line in matrix ) )
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
if matrix_dimensions(__lowerCAmelCase ) == (2, 2):
return default_matrix_multiplication(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =split_matrix(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ =split_matrix(__lowerCAmelCase )
lowerCamelCase__ =actual_strassen(__lowerCAmelCase , matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCamelCase__ =actual_strassen(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
lowerCamelCase__ =actual_strassen(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
lowerCamelCase__ =actual_strassen(__lowerCAmelCase , matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCamelCase__ =actual_strassen(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCamelCase__ =actual_strassen(matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) , matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCamelCase__ =actual_strassen(matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) , matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) )
lowerCamelCase__ =matrix_addition(matrix_subtraction(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase ) , __lowerCAmelCase )
lowerCamelCase__ =matrix_addition(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ =matrix_addition(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ =matrix_subtraction(matrix_subtraction(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase ) , __lowerCAmelCase )
# construct the new matrix from our 4 quadrants
lowerCamelCase__ =[]
for i in range(len(__lowerCAmelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__lowerCAmelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
if matrix_dimensions(__lowerCAmelCase )[1] != matrix_dimensions(__lowerCAmelCase )[0]:
lowerCamelCase__ =(
"Unable to multiply these matrices, please check the dimensions.\n"
F'''Matrix A: {matrixa}\n'''
F'''Matrix B: {matrixa}'''
)
raise Exception(__lowerCAmelCase )
lowerCamelCase__ =matrix_dimensions(__lowerCAmelCase )
lowerCamelCase__ =matrix_dimensions(__lowerCAmelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowerCamelCase__ =max(*__lowerCAmelCase , *__lowerCAmelCase )
lowerCamelCase__ =int(math.pow(2 , math.ceil(math.loga(__lowerCAmelCase ) ) ) )
lowerCamelCase__ =matrixa
lowerCamelCase__ =matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __lowerCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowerCamelCase__ =actual_strassen(__lowerCAmelCase , __lowerCAmelCase )
# Removing the additional zeros
for i in range(0 , __lowerCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCAmelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a =[
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a =[[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 530 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : Dict ):
"""simple docstring"""
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = metric_id
class __lowerCAmelCase :
"""simple docstring"""
snake_case_ = [MetricMock(__magic_name__ ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def lowercase_ ( self ) -> Any:
'''simple docstring'''
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
if "tmp_path" in args:
__lowerCamelCase = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(UpperCamelCase__ , match='https://huggingface.co/docs/evaluate' ):
func(*UpperCamelCase__ )
| 705 |
__A = "Input must be a string of 8 numbers plus letter"
__A = "TRWAGMYFPDXBNJZSQVHLCKE"
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> bool:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__lowerCamelCase = F"""Expected string as input, found {type(UpperCamelCase__ ).__name__}"""
raise TypeError(UpperCamelCase__ )
__lowerCamelCase = spanish_id.replace('-' , '' ).upper()
if len(UpperCamelCase__ ) != 9:
raise ValueError(UpperCamelCase__ )
try:
__lowerCamelCase = int(spanish_id_clean[0:8] )
__lowerCamelCase = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(UpperCamelCase__ ) from ex
if letter.isdigit():
raise ValueError(UpperCamelCase__ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167 | 0 |
from math import factorial
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] ,A : Optional[int] ,A : int ):
__A = real
if isinstance(A ,A ):
__A = [1] * rank
else:
__A = rank
def __repr__( self : Tuple ):
return (
f'''{self.real}+'''
f'''{'+'.join(str(A )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def UpperCamelCase_ ( self : List[Any] ):
__A = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real ,A )
def __add__( self : Optional[Any] ,A : List[Any] ):
if not isinstance(A ,A ):
return Dual(self.real + other ,self.duals )
__A = self.duals.copy()
__A = other.duals.copy()
if len(A ) > len(A ):
o_dual.extend([1] * (len(A ) - len(A )) )
elif len(A ) < len(A ):
s_dual.extend([1] * (len(A ) - len(A )) )
__A = []
for i in range(len(A ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real ,A )
snake_case_ = __add__
def __sub__( self : int ,A : Dict ):
return self + other * -1
def __mul__( self : List[Any] ,A : List[str] ):
if not isinstance(A ,A ):
__A = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other ,A )
__A = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real ,A )
snake_case_ = __mul__
def __truediv__( self : Tuple ,A : Dict ):
if not isinstance(A ,A ):
__A = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other ,A )
raise ValueError
def __floordiv__( self : List[str] ,A : Tuple ):
if not isinstance(A ,A ):
__A = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other ,A )
raise ValueError
def __pow__( self : Optional[Any] ,A : List[str] ):
if n < 0 or isinstance(A ,A ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
__A = self
for _ in range(n - 1 ):
x *= self
return x
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
if not callable(a_ ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(a_ , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(a_ , a_ ):
raise ValueError("differentiate() requires an int as input for order" )
__A = Dual(a_ , 1 )
__A = func(a_ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 55 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
SCREAMING_SNAKE_CASE :Union[str, Any] = object()
# For specifying empty leaf dict `{}`
SCREAMING_SNAKE_CASE :List[str] = object()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
__A = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(a_ ) - len(a_ ) + 1 ):
__A = [x.match(a_ ) for x, y in zip(a_ , ks[i:] )]
if matches and all(a_ ):
return True
return False
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
def replace(a_ , a_ ):
for rule, replacement in rules:
if _match(a_ , a_ ):
return replacement
return val
return replace
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , a_ )),
(("transformer", "wte", "embedding"), P("mp" , a_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(a_ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , a_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(a_ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , a_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
__A = _get_partition_rules()
__A = _replacement_rules(a_ )
__A = {k: _unmatched for k in flatten_dict(a_ )}
__A = {k: replace(a_ , a_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(a_ ) )
| 55 | 1 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _lowerCAmelCase ( __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _lowerCAmelCase ( ) -> List[str]:
"""simple docstring"""
snake_case__ : Union[str, Any] = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=__lowerCAmelCase )
snake_case__ : Tuple = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__lowerCAmelCase )
EnvironmentCommand.register_subcommand(__lowerCAmelCase )
TestCommand.register_subcommand(__lowerCAmelCase )
RunBeamCommand.register_subcommand(__lowerCAmelCase )
DummyDataCommand.register_subcommand(__lowerCAmelCase )
# Parse args
snake_case__ : List[Any] = parser.parse_known_args()
if not hasattr(__lowerCAmelCase , '''func''' ):
parser.print_help()
exit(1 )
snake_case__ : Optional[int] = parse_unknown_args(__lowerCAmelCase )
# Run
snake_case__ : int = args.func(__lowerCAmelCase , **__lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 706 |
from scipy.stats import spearmanr
import datasets
A__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
A__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
A__ = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def __lowerCamelCase ( self :Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) ,reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] ,)
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Union[str, Any] ,__lowercase :Dict ,__lowercase :Dict=False ):
snake_case__ : str = spearmanr(__lowercase ,__lowercase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 219 | 0 |
from statistics import mean
import numpy as np
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
a__ = 0
# Number of processes finished
a__ = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
a__ = [0] * no_of_process
# List to include calculation results
a__ = [0] * no_of_process
# Sort by arrival time.
a__ = [burst_time[i] for i in np.argsort(__UpperCAmelCase )]
a__ = [process_name[i] for i in np.argsort(__UpperCAmelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
a__ = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
a__ = arrival_time[i]
a__ = 0
# Index showing the location of the process being performed
a__ = 0
# Saves the current response ratio.
a__ = 0
for i in range(0 , __UpperCAmelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
a__ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
a__ = temp
a__ = i
# Calculate the turn around time
a__ = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
a__ = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
a__ = [0] * no_of_process
for i in range(0 , __UpperCAmelCase ):
a__ = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
a_ : List[Any] = 5
a_ : Optional[int] = ['A', 'B', 'C', 'D', 'E']
a_ : Union[str, Any] = [1, 2, 3, 4, 5]
a_ : Dict = [1, 2, 3, 4, 5]
a_ : List[Any] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
a_ : int = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time')
for i in range(0, no_of_process):
print(
f'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
f'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(f'average waiting time : {mean(waiting_time):.5f}')
print(f'average turn around time : {mean(turn_around_time):.5f}')
| 194 |
import math
from collections.abc import Iterator
from itertools import takewhile
def __a ( __UpperCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( ):
a__ = 2
while True:
if is_prime(__UpperCAmelCase ):
yield num
num += 1
def __a ( __UpperCAmelCase = 200_0000 ):
return sum(takewhile(lambda __UpperCAmelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 194 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
a = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a = {
'vocab_file': {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt',
},
'tokenizer_file': {
'unc-nlp/lxmert-base-uncased': (
'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'
),
},
}
a = {
'unc-nlp/lxmert-base-uncased': 512,
}
a = {
'unc-nlp/lxmert-base-uncased': {'do_lower_case': True},
}
class a_ ( snake_case ):
UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Any = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : str = LxmertTokenizer
def __init__( self : List[str] , a_ : str=None , a_ : str=None , a_ : str=True , a_ : List[Any]="[UNK]" , a_ : Optional[Any]="[SEP]" , a_ : Optional[int]="[PAD]" , a_ : int="[CLS]" , a_ : Tuple="[MASK]" , a_ : List[str]=True , a_ : Optional[int]=None , **a_ : Dict , ) -> List[str]:
super().__init__(
a_ , tokenizer_file=a_ , do_lower_case=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , tokenize_chinese_chars=a_ , strip_accents=a_ , **a_ , )
snake_case: Optional[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , a_ ) != do_lower_case
or normalizer_state.get('strip_accents' , a_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , a_ ) != tokenize_chinese_chars
):
snake_case: Optional[Any] =getattr(a_ , normalizer_state.pop('type' ) )
snake_case: int =do_lower_case
snake_case: List[str] =strip_accents
snake_case: int =tokenize_chinese_chars
snake_case: str =normalizer_class(**a_ )
snake_case: Tuple =do_lower_case
def UpperCamelCase ( self : List[Any] , a_ : int , a_ : Tuple=None ) -> str:
snake_case: Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self : Tuple , a_ : List[int] , a_ : Optional[List[int]] = None ) -> List[int]:
snake_case: str =[self.sep_token_id]
snake_case: Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : List[str] , a_ : str , a_ : Optional[str] = None ) -> Tuple[str]:
snake_case: Optional[Any] =self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
| 347 |
'''simple docstring'''
import numpy as np
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case: int =int(np.ceil((x_end - xa) / h ) )
snake_case: Optional[int] =np.zeros((n + 1,) )
snake_case: Optional[int] =ya
snake_case: List[str] =xa
for k in range(__UpperCAmelCase ):
snake_case: Optional[int] =f(__UpperCAmelCase , y[k] )
snake_case: Optional[Any] =f(x + 0.5 * h , y[k] + 0.5 * h * ka )
snake_case: Optional[Any] =f(x + 0.5 * h , y[k] + 0.5 * h * ka )
snake_case: Optional[Any] =f(x + h , y[k] + h * ka )
snake_case: List[Any] =y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 1 |
import tensorflow as tf
from ...tf_utils import shape_list
class lowerCamelCase ( tf.keras.layers.Layer ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=1 , lowercase__=False , **lowercase__):
super().__init__(**lowercase__)
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Tuple = d_embed
__UpperCAmelCase : int = d_proj
__UpperCAmelCase : List[Any] = cutoffs + [vocab_size]
__UpperCAmelCase : str = [0] + self.cutoffs
__UpperCAmelCase : Union[str, Any] = div_val
__UpperCAmelCase : Dict = self.cutoffs[0]
__UpperCAmelCase : List[str] = len(self.cutoffs) - 1
__UpperCAmelCase : str = self.shortlist_size + self.n_clusters
__UpperCAmelCase : List[Any] = keep_order
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : List[Any] = []
def A( self , lowercase__):
if self.n_clusters > 0:
__UpperCAmelCase : str = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=lowercase__ , name='''cluster_weight''')
__UpperCAmelCase : Union[str, Any] = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=lowercase__ , name='''cluster_bias''')
if self.div_val == 1:
for i in range(len(self.cutoffs)):
if self.d_proj != self.d_embed:
__UpperCAmelCase : str = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=lowercase__ , name=F"out_projs_._{i}" , )
self.out_projs.append(lowercase__)
else:
self.out_projs.append(lowercase__)
__UpperCAmelCase : Union[str, Any] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=lowercase__ , name=F"out_layers_._{i}_._weight" , )
__UpperCAmelCase : int = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=lowercase__ , name=F"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias))
else:
for i in range(len(self.cutoffs)):
__UpperCAmelCase : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__UpperCAmelCase : List[Any] = self.d_embed // (self.div_val**i)
__UpperCAmelCase : Any = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=lowercase__ , name=F"out_projs_._{i}")
self.out_projs.append(lowercase__)
__UpperCAmelCase : Any = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=lowercase__ , name=F"out_layers_._{i}_._weight" , )
__UpperCAmelCase : Dict = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=lowercase__ , name=F"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias))
super().build(lowercase__)
@staticmethod
def A( lowercase__ , lowercase__ , lowercase__ , lowercase__=None):
__UpperCAmelCase : List[str] = x
if proj is not None:
__UpperCAmelCase : List[str] = tf.einsum('''ibd,ed->ibe''' , lowercase__ , lowercase__)
return tf.einsum('''ibd,nd->ibn''' , lowercase__ , lowercase__) + b
@staticmethod
def A( lowercase__ , lowercase__):
__UpperCAmelCase : List[Any] = shape_list(lowercase__)
__UpperCAmelCase : List[Any] = tf.range(lp_size[0] , dtype=target.dtype)
__UpperCAmelCase : List[str] = tf.stack([r, target] , 1)
return tf.gather_nd(lowercase__ , lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__=True , lowercase__=False):
__UpperCAmelCase : Dict = 0
if self.n_clusters == 0:
__UpperCAmelCase : int = self._logit(lowercase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0])
if target is not None:
__UpperCAmelCase : List[str] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowercase__ , logits=lowercase__)
__UpperCAmelCase : List[Any] = tf.nn.log_softmax(lowercase__ , axis=-1)
else:
__UpperCAmelCase : int = shape_list(lowercase__)
__UpperCAmelCase : int = []
__UpperCAmelCase : Tuple = tf.zeros(hidden_sizes[:2])
for i in range(len(self.cutoffs)):
__UpperCAmelCase : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__UpperCAmelCase : Tuple = (target >= l_idx) & (target < r_idx)
__UpperCAmelCase : Union[str, Any] = tf.where(lowercase__)
__UpperCAmelCase : Dict = tf.boolean_mask(lowercase__ , lowercase__) - l_idx
if self.div_val == 1:
__UpperCAmelCase : List[str] = self.out_layers[0][0][l_idx:r_idx]
__UpperCAmelCase : List[str] = self.out_layers[0][1][l_idx:r_idx]
else:
__UpperCAmelCase : Union[str, Any] = self.out_layers[i][0]
__UpperCAmelCase : int = self.out_layers[i][1]
if i == 0:
__UpperCAmelCase : Dict = tf.concat([cur_W, self.cluster_weight] , 0)
__UpperCAmelCase : Any = tf.concat([cur_b, self.cluster_bias] , 0)
__UpperCAmelCase : List[Any] = self._logit(lowercase__ , lowercase__ , lowercase__ , self.out_projs[0])
__UpperCAmelCase : Optional[Any] = tf.nn.log_softmax(lowercase__)
out.append(head_logprob[..., : self.cutoffs[0]])
if target is not None:
__UpperCAmelCase : List[str] = tf.boolean_mask(lowercase__ , lowercase__)
__UpperCAmelCase : Union[str, Any] = self._gather_logprob(lowercase__ , lowercase__)
else:
__UpperCAmelCase : Dict = self._logit(lowercase__ , lowercase__ , lowercase__ , self.out_projs[i])
__UpperCAmelCase : Optional[int] = tf.nn.log_softmax(lowercase__)
__UpperCAmelCase : Tuple = self.cutoffs[0] + i - 1 # No probability for the head cluster
__UpperCAmelCase : Optional[int] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowercase__)
if target is not None:
__UpperCAmelCase : Optional[Any] = tf.boolean_mask(lowercase__ , lowercase__)
__UpperCAmelCase : Union[str, Any] = tf.boolean_mask(lowercase__ , lowercase__)
__UpperCAmelCase : str = self._gather_logprob(lowercase__ , lowercase__)
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowercase__ , -cur_logprob , shape_list(lowercase__))
__UpperCAmelCase : Dict = tf.concat(lowercase__ , axis=-1)
if target is not None:
if return_mean:
__UpperCAmelCase : Optional[Any] = tf.reduce_mean(lowercase__)
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowercase__)
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowercase__ , name=self.name , aggregation='''mean''' if return_mean else '''''')
return out
| 462 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__a : Tuple = '''▁'''
__a : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase( snake_case_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = BigBirdTokenizer
a : List[str] = BigBirdTokenizerFast
a : str = True
a : int = True
def __a ( self ) -> Optional[int]:
"""simple docstring"""
super().setUp()
lowercase__ : List[Any] = self.tokenizer_class(lowerCamelCase , keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[int] = "<s>"
lowercase__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(lowerCamelCase ) , 1004 )
def __a ( self ) -> List[str]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : List[Any] = self.get_rust_tokenizer()
lowercase__ : List[Any] = "I was born in 92000, and this is falsé."
lowercase__ : Union[str, Any] = tokenizer.tokenize(lowerCamelCase )
lowercase__ : Tuple = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
lowercase__ : str = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
lowercase__ : Union[str, Any] = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
lowercase__ : Union[str, Any] = self.get_rust_tokenizer()
lowercase__ : int = tokenizer.encode(lowerCamelCase )
lowercase__ : Any = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[int] = BigBirdTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
lowercase__ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [285, 46, 10, 170, 382] , )
lowercase__ : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase__ : int = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = "Hello World!"
lowercase__ : Union[str, Any] = [65, 18536, 2260, 101, 66]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def __a ( self ) -> Dict:
"""simple docstring"""
lowercase__ : Any = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
lowercase__ : Dict = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@require_torch
@slow
def __a ( self ) -> List[Any]:
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase__ : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase__ : Any = " ".join(lowerCamelCase )
lowercase__ : Dict = self.big_tokenizer.encode_plus(lowerCamelCase , return_tensors="pt" , return_token_type_ids=lowerCamelCase )
lowercase__ : Dict = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=lowerCamelCase )
lowercase__ : str = BigBirdConfig(attention_type="original_full" )
lowercase__ : Union[str, Any] = BigBirdModel(lowerCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase )
model(**lowerCamelCase )
@slow
def __a ( self ) -> Dict:
"""simple docstring"""
lowercase__ : str = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
lowercase__ : List[Any] = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : str = {"input_ids": [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , ) | 397 | 0 |
from PIL import Image
def SCREAMING_SNAKE_CASE ( snake_case_ : Image , snake_case_ : int ):
snake_case__ : Union[str, Any] = (259 * (level + 255)) / (255 * (259 - level))
def contrast(snake_case_ : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(snake_case_ )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
__lowerCamelCase : Dict = change_contrast(img, 170)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 25 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCamelCase : Optional[int] = get_logger()
__lowerCamelCase : Optional[dict] = None
class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Dict=None , __A : List[str]=None , **__A : str ):
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
snake_case__ : List[Any] = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
snake_case__ : str = str(jax.devices()[0] )
snake_case__ : str = jnp_array_kwargs
@staticmethod
def _lowercase ( ):
import jax
return {str(__A ): device for device in jax.devices()}
def _lowercase ( self : Optional[Any] , __A : str ):
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def _lowercase ( self : int , __A : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ : Optional[int] = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case__ : Any = {"dtype": jnp.intaa}
else:
snake_case__ : Tuple = {"dtype": jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ : str = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
snake_case__ : Optional[Any] = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , "__array__" ) and not isinstance(__A , jax.Array ):
snake_case__ : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def _lowercase ( self : Tuple , __A : dict ):
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def _lowercase ( self : Optional[int] , __A : pa.Table ):
snake_case__ : int = self.numpy_arrow_extractor().extract_row(__A )
snake_case__ : Tuple = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def _lowercase ( self : Optional[Any] , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_column(__A )
snake_case__ : Optional[int] = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
snake_case__ : Dict = self._consolidate(__A )
return column
def _lowercase ( self : str , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_batch(__A )
snake_case__ : int = self.python_features_decoder.decode_batch(__A )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
for column_name in batch:
snake_case__ : Any = self._consolidate(batch[column_name] )
return batch
| 25 | 1 |
"""simple docstring"""
_a : List[str] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a : Optional[Any] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a : Dict = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 213 |
'''simple docstring'''
def lowerCAmelCase_ ( a : int ):
a__ = generate_pascal_triangle(a )
for row_idx in range(a ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def lowerCAmelCase_ ( a : int ):
if not isinstance(a , a ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(a ):
a__ = populate_current_row(a , a )
triangle.append(a )
return triangle
def lowerCAmelCase_ ( a : list[list[int]] , a : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , a ):
calculate_current_element(
a , a , a , a )
return current_row
def lowerCAmelCase_ ( a : list[list[int]] , a : list[int] , a : int , a : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def lowerCAmelCase_ ( a : int ):
if not isinstance(a , a ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , a ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(a , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(a )
return result
def lowerCAmelCase_ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(a : Callable , a : int ) -> None:
a__ = f'''{func.__name__}({value})'''
a__ = timeit(f'''__main__.{call}''' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(a , a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 394 | 0 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCAmelCase_ ( __lowerCamelCase ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class a (nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase : nn.Module , lowerCamelCase : int ) -> List[str]:
super().__init__()
__snake_case : Union[str, Any] = module
__snake_case : Tuple = nn.Sequential(
nn.Linear(module.in_features , lowerCamelCase , bias=lowerCamelCase ) , nn.Linear(lowerCamelCase , module.out_features , bias=lowerCamelCase ) , )
__snake_case : Union[str, Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __snake_case ( self : str , lowerCamelCase : Tuple , *lowerCamelCase : Optional[int] , **lowerCamelCase : Dict ) -> Dict:
return self.module(lowerCamelCase , *lowerCamelCase , **lowerCamelCase ) + self.adapter(lowerCamelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class a (unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "bigscience/bloom-1b7"
# Constant values
__UpperCAmelCase : str = 2.109659552692574
__UpperCAmelCase : Optional[Any] = "Hello my name is"
__UpperCAmelCase : List[Any] = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
__UpperCAmelCase : Optional[Any] = 10
def __snake_case ( self : Tuple ) -> Optional[Any]:
# Models and tokenizer
__snake_case : Any = AutoTokenizer.from_pretrained(self.model_name )
class a (_lowerCAmelCase ):
"""simple docstring"""
def __snake_case ( self : List[str] ) -> Optional[Any]:
super().setUp()
# Models and tokenizer
__snake_case : str = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
__snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase , device_map="auto" )
def __snake_case ( self : Optional[Any] ) -> Dict:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[Any] ) -> Any:
__snake_case : List[Any] = self.model_abit.config
self.assertTrue(hasattr(lowerCamelCase , "quantization_config" ) )
__snake_case : List[str] = config.to_dict()
__snake_case : int = config.to_diff_dict()
__snake_case : Any = config.to_json_string()
def __snake_case ( self : int ) -> Tuple:
from bitsandbytes.nn import Paramsabit
__snake_case : Tuple = self.model_fpaa.get_memory_footprint()
__snake_case : Optional[int] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__snake_case : int = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __snake_case ( self : str ) -> Optional[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCamelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __snake_case ( self : int ) -> int:
__snake_case : str = self.tokenizer(self.input_text , return_tensors="pt" )
__snake_case : str = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase ) , self.EXPECTED_OUTPUTS )
def __snake_case ( self : List[str] ) -> Tuple:
__snake_case : str = BitsAndBytesConfig()
__snake_case : Dict = True
__snake_case : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase , device_map="auto" )
__snake_case : Tuple = self.tokenizer(self.input_text , return_tensors="pt" )
__snake_case : List[str] = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase ) , self.EXPECTED_OUTPUTS )
def __snake_case ( self : Tuple ) -> Tuple:
with self.assertRaises(lowerCamelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCamelCase )
def __snake_case ( self : str ) -> Union[str, Any]:
__snake_case : Tuple = BitsAndBytesConfig()
with self.assertRaises(lowerCamelCase ):
__snake_case : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase , load_in_abit=lowerCamelCase , device_map="auto" , bnb_abit_quant_type="nf4" , )
def __snake_case ( self : int ) -> List[str]:
with self.assertRaises(lowerCamelCase ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(lowerCamelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCamelCase ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(lowerCamelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCamelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case : List[str] = self.tokenizer(self.input_text , return_tensors="pt" )
__snake_case : str = self.model_fpaa.to(torch.floataa )
__snake_case : Dict = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__snake_case : Optional[int] = self.model_fpaa.to("cpu" )
# Check this does not throw an error
__snake_case : str = self.model_fpaa.half()
# Check this does not throw an error
__snake_case : Tuple = self.model_fpaa.float()
def __snake_case ( self : Dict ) -> Optional[int]:
__snake_case : List[Any] = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCamelCase , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class a (unittest.TestCase ):
"""simple docstring"""
@classmethod
def __snake_case ( cls : int ) -> Any:
__snake_case : Optional[Any] = "t5-small"
__snake_case : List[Any] = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case : int = AutoTokenizer.from_pretrained(cls.model_name )
__snake_case : Tuple = "Translate in German: Hello, my dog is cute"
def __snake_case ( self : List[Any] ) -> Dict:
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Any ) -> List[str]:
from transformers import TaForConditionalGeneration
__snake_case : int = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case : Any = None
# test with `t5-small`
__snake_case : str = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase , device_map="auto" )
__snake_case : int = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__snake_case : Optional[int] = model.generate(**lowerCamelCase )
# test with `flan-t5-small`
__snake_case : Tuple = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase , device_map="auto" )
__snake_case : Dict = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__snake_case : Any = model.generate(**lowerCamelCase )
__snake_case : List[Any] = modules
def __snake_case ( self : Optional[Any] ) -> str:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__snake_case : List[Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__snake_case : Optional[int] = model.generate(**lowerCamelCase )
# test with `flan-t5-small`
__snake_case : Optional[int] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase , device_map="auto" )
__snake_case : int = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__snake_case : str = model.generate(**lowerCamelCase )
class a (_lowerCAmelCase ):
"""simple docstring"""
def __snake_case ( self : Union[str, Any] ) -> Dict:
super().setUp()
# model_name
__snake_case : Any = "bigscience/bloom-560m"
__snake_case : int = "t5-small"
# Different types of model
__snake_case : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase , device_map="auto" )
# Sequence classification model
__snake_case : List[str] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase , device_map="auto" )
# CausalLM model
__snake_case : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase , device_map="auto" )
# Seq2seq model
__snake_case : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCamelCase , device_map="auto" )
def __snake_case ( self : Union[str, Any] ) -> Optional[int]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : int ) -> int:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class a (_lowerCAmelCase ):
"""simple docstring"""
def __snake_case ( self : int ) -> str:
super().setUp()
def __snake_case ( self : int ) -> Optional[int]:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Tuple ) -> List[str]:
__snake_case : Optional[Any] = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case : Optional[Any] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class a (_lowerCAmelCase ):
"""simple docstring"""
def __snake_case ( self : List[str] ) -> str:
super().setUp()
def __snake_case ( self : Optional[int] ) -> int:
__snake_case : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__snake_case : Any = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
__snake_case : Any = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase ) , self.EXPECTED_OUTPUTS )
class a (_lowerCAmelCase ):
"""simple docstring"""
def __snake_case ( self : Dict ) -> Any:
__snake_case : Dict = "facebook/opt-350m"
super().setUp()
def __snake_case ( self : List[str] ) -> Tuple:
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
__snake_case : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__snake_case : Tuple = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case : List[Any] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCamelCase ) ):
__snake_case : Optional[int] = LoRALayer(module.q_proj , rank=16 )
__snake_case : Any = LoRALayer(module.k_proj , rank=16 )
__snake_case : Optional[int] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__snake_case : Dict = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case : Optional[Any] = model.forward(**lowerCamelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCamelCase , lowerCamelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCamelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "gpt2-xl"
__UpperCAmelCase : Any = 3.3191854854152187
| 203 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = StableDiffusionXLImgaImgPipeline
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__UpperCAmelCase : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
__UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __snake_case ( self : Optional[Any] ) -> Tuple:
torch.manual_seed(0 )
__snake_case : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__snake_case : Tuple = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
__snake_case : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=32 , )
__snake_case : List[str] = CLIPTextModel(lowerCamelCase )
__snake_case : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=lowerCamelCase )
__snake_case : List[str] = CLIPTextModelWithProjection(lowerCamelCase )
__snake_case : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=lowerCamelCase )
__snake_case : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[Any]=0 ) -> Union[str, Any]:
__snake_case : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
__snake_case : Any = image / 2 + 0.5
if str(lowerCamelCase ).startswith("mps" ):
__snake_case : Dict = torch.manual_seed(lowerCamelCase )
else:
__snake_case : int = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def __snake_case ( self : Dict ) -> Any:
__snake_case : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case : Any = self.get_dummy_components()
__snake_case : int = StableDiffusionXLImgaImgPipeline(**lowerCamelCase )
__snake_case : List[str] = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Tuple = self.get_dummy_inputs(lowerCamelCase )
__snake_case : Dict = sd_pipe(**lowerCamelCase ).images
__snake_case : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__snake_case : str = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __snake_case ( self : str ) -> Optional[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __snake_case ( self : Any ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __snake_case ( self : str ) -> Optional[int]:
pass
def __snake_case ( self : Tuple ) -> Union[str, Any]:
__snake_case : str = self.get_dummy_components()
__snake_case : List[Any] = StableDiffusionXLImgaImgPipeline(**lowerCamelCase )
__snake_case : Optional[Any] = sd_pipe.to(lowerCamelCase )
__snake_case : int = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
# forward without prompt embeds
__snake_case : List[str] = self.get_dummy_inputs(lowerCamelCase )
__snake_case : str = 3 * ["this is a negative prompt"]
__snake_case : Any = negative_prompt
__snake_case : Optional[Any] = 3 * [inputs["prompt"]]
__snake_case : int = sd_pipe(**lowerCamelCase )
__snake_case : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__snake_case : List[Any] = self.get_dummy_inputs(lowerCamelCase )
__snake_case : Optional[Any] = 3 * ["this is a negative prompt"]
__snake_case : int = 3 * [inputs.pop("prompt" )]
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict = sd_pipe.encode_prompt(lowerCamelCase , negative_prompt=lowerCamelCase )
__snake_case : Tuple = sd_pipe(
**lowerCamelCase , prompt_embeds=lowerCamelCase , negative_prompt_embeds=lowerCamelCase , pooled_prompt_embeds=lowerCamelCase , negative_pooled_prompt_embeds=lowerCamelCase , )
__snake_case : List[str] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[int] ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Optional[Any]="cpu" , lowerCamelCase : str=torch.floataa , lowerCamelCase : int=0 ) -> Dict:
__snake_case : int = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : Optional[Any] = np.random.RandomState(lowerCamelCase ).standard_normal((1, 4, 64, 64) )
__snake_case : Optional[Any] = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase , dtype=lowerCamelCase )
__snake_case : List[str] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : str ) -> Any:
__snake_case : List[str] = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : int = self.get_inputs(lowerCamelCase )
__snake_case : Optional[Any] = pipe(**lowerCamelCase ).images
__snake_case : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__snake_case : Optional[int] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 203 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCamelCase (__snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = """informer"""
_SCREAMING_SNAKE_CASE : Tuple = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self :List[str] , __magic_name__ :Optional[int] = None , __magic_name__ :Optional[int] = None , __magic_name__ :str = "student_t" , __magic_name__ :str = "nll" , __magic_name__ :int = 1 , __magic_name__ :List[int] = None , __magic_name__ :Optional[Union[str, bool]] = "mean" , __magic_name__ :int = 0 , __magic_name__ :int = 0 , __magic_name__ :int = 0 , __magic_name__ :int = 0 , __magic_name__ :Optional[List[int]] = None , __magic_name__ :Optional[List[int]] = None , __magic_name__ :int = 64 , __magic_name__ :int = 32 , __magic_name__ :int = 32 , __magic_name__ :int = 2 , __magic_name__ :int = 2 , __magic_name__ :int = 2 , __magic_name__ :int = 2 , __magic_name__ :bool = True , __magic_name__ :str = "gelu" , __magic_name__ :float = 0.05 , __magic_name__ :float = 0.1 , __magic_name__ :float = 0.1 , __magic_name__ :float = 0.1 , __magic_name__ :float = 0.1 , __magic_name__ :int = 100 , __magic_name__ :float = 0.02 , __magic_name__ :List[Any]=True , __magic_name__ :str = "prob" , __magic_name__ :int = 5 , __magic_name__ :bool = True , **__magic_name__ :List[Any] , ) ->Dict:
# time series specific configuration
lowercase : Tuple = prediction_length
lowercase : int = context_length or prediction_length
lowercase : List[Any] = distribution_output
lowercase : List[str] = loss
lowercase : List[Any] = input_size
lowercase : Union[str, Any] = num_time_features
lowercase : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowercase : Optional[Any] = scaling
lowercase : List[str] = num_dynamic_real_features
lowercase : int = num_static_real_features
lowercase : Union[str, Any] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__magic_name__ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowercase : int = cardinality
else:
lowercase : Union[str, Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__magic_name__ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowercase : str = embedding_dimension
else:
lowercase : Dict = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase : List[str] = num_parallel_samples
# Transformer architecture configuration
lowercase : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase : int = d_model
lowercase : Dict = encoder_attention_heads
lowercase : Dict = decoder_attention_heads
lowercase : Optional[Any] = encoder_ffn_dim
lowercase : Tuple = decoder_ffn_dim
lowercase : Union[str, Any] = encoder_layers
lowercase : str = decoder_layers
lowercase : int = dropout
lowercase : List[Any] = attention_dropout
lowercase : Optional[Any] = activation_dropout
lowercase : List[str] = encoder_layerdrop
lowercase : Any = decoder_layerdrop
lowercase : List[str] = activation_function
lowercase : Tuple = init_std
lowercase : List[Any] = use_cache
# Informer
lowercase : Tuple = attention_type
lowercase : List[str] = sampling_factor
lowercase : List[str] = distil
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def __snake_case ( self :Any ) ->int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 264 |
"""simple docstring"""
def UpperCamelCase ( _A ) -> int:
lowercase : Dict = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def UpperCamelCase ( _A = 100 ) -> int:
lowercase : Union[str, Any] = 1
lowercase : Tuple = 2
for i in range(2 , max_n + 1 ):
lowercase : Any = pre_numerator
lowercase : Dict = 2 * i // 3 if i % 3 == 0 else 1
lowercase : Optional[Any] = cur_numerator
lowercase : str = e_cont * pre_numerator + temp
return sum_digits(_A )
if __name__ == "__main__":
print(F'{solution() = }')
| 264 | 1 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_lowerCAmelCase : List[str] = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def UpperCAmelCase_ ( snake_case__ ) -> str:
"""simple docstring"""
if isinstance(snake_case__ , torch.Tensor ):
return image
elif isinstance(snake_case__ , PIL.Image.Image ):
lowerCAmelCase__ = [image]
lowerCAmelCase__ = [trans(img.convert('RGB' ) ) for img in image]
lowerCAmelCase__ = torch.stack(snake_case__ )
return image
class __snake_case ( SCREAMING_SNAKE_CASE ):
def __init__( self ,a_ ,a_ ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=a_ ,scheduler=a_ )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}' )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = min(int(num_inference_steps * strength ) ,a_ )
lowerCAmelCase__ = max(num_inference_steps - init_timestep ,0 )
lowerCAmelCase__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_=None ):
"""simple docstring"""
if not isinstance(a_ ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a_ )}' )
lowerCAmelCase__ = image.to(device=a_ ,dtype=a_ )
if isinstance(a_ ,a_ ) and len(a_ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(a_ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
lowerCAmelCase__ = init_latents.shape
lowerCAmelCase__ = randn_tensor(a_ ,generator=a_ ,device=a_ ,dtype=a_ )
# get latents
print('add noise to latents at timestep' ,a_ )
lowerCAmelCase__ = self.scheduler.add_noise(a_ ,a_ ,a_ )
lowerCAmelCase__ = init_latents
return latents
@torch.no_grad()
def __call__( self ,a_ = None ,a_ = 0.8 ,a_ = 1 ,a_ = None ,a_ = 0.0 ,a_ = 50 ,a_ = None ,a_ = "pil" ,a_ = True ,):
"""simple docstring"""
self.check_inputs(a_ )
# 2. Preprocess image
lowerCAmelCase__ = preprocess(a_ )
# 3. set timesteps
self.scheduler.set_timesteps(a_ ,device=self.device )
lowerCAmelCase__ , lowerCAmelCase__ = self.get_timesteps(a_ ,a_ ,self.device )
lowerCAmelCase__ = timesteps[:1].repeat(a_ )
# 4. Prepare latent variables
lowerCAmelCase__ = self.prepare_latents(a_ ,a_ ,a_ ,self.unet.dtype ,self.device ,a_ )
lowerCAmelCase__ = latents
# 5. Denoising loop
for t in self.progress_bar(a_ ):
# 1. predict noise model_output
lowerCAmelCase__ = self.unet(a_ ,a_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase__ = self.scheduler.step(
a_ ,a_ ,a_ ,eta=a_ ,use_clipped_model_output=a_ ,generator=a_ ,).prev_sample
lowerCAmelCase__ = (image / 2 + 0.5).clamp(0 ,1 )
lowerCAmelCase__ = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=a_ )
| 714 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = 42
class __snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self ,a_ = 16 ,a_ = 88 ,a_ = None ,a_ = None ,a_ = 1 ,a_ = 0.0 ,a_ = 32 ,a_ = None ,a_ = False ,a_ = None ,a_ = "geglu" ,a_ = True ,a_ = True ,):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = attention_head_dim
lowerCAmelCase__ = num_attention_heads * attention_head_dim
lowerCAmelCase__ = in_channels
lowerCAmelCase__ = torch.nn.GroupNorm(num_groups=a_ ,num_channels=a_ ,eps=1e-6 ,affine=a_ )
lowerCAmelCase__ = nn.Linear(a_ ,a_ )
# 3. Define transformers blocks
lowerCAmelCase__ = nn.ModuleList(
[
BasicTransformerBlock(
a_ ,a_ ,a_ ,dropout=a_ ,cross_attention_dim=a_ ,activation_fn=a_ ,attention_bias=a_ ,double_self_attention=a_ ,norm_elementwise_affine=a_ ,)
for d in range(a_ )
] )
lowerCAmelCase__ = nn.Linear(a_ ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_=None ,a_=None ,a_=None ,a_=1 ,a_=None ,a_ = True ,):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = hidden_states.shape
lowerCAmelCase__ = batch_frames // num_frames
lowerCAmelCase__ = hidden_states
lowerCAmelCase__ = hidden_states[None, :].reshape(a_ ,a_ ,a_ ,a_ ,a_ )
lowerCAmelCase__ = hidden_states.permute(0 ,2 ,1 ,3 ,4 )
lowerCAmelCase__ = self.norm(a_ )
lowerCAmelCase__ = hidden_states.permute(0 ,3 ,4 ,2 ,1 ).reshape(batch_size * height * width ,a_ ,a_ )
lowerCAmelCase__ = self.proj_in(a_ )
# 2. Blocks
for block in self.transformer_blocks:
lowerCAmelCase__ = block(
a_ ,encoder_hidden_states=a_ ,timestep=a_ ,cross_attention_kwargs=a_ ,class_labels=a_ ,)
# 3. Output
lowerCAmelCase__ = self.proj_out(a_ )
lowerCAmelCase__ = (
hidden_states[None, None, :]
.reshape(a_ ,a_ ,a_ ,a_ ,a_ )
.permute(0 ,3 ,4 ,1 ,2 )
.contiguous()
)
lowerCAmelCase__ = hidden_states.reshape(a_ ,a_ ,a_ ,a_ )
lowerCAmelCase__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=a_ )
| 604 | 0 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_UpperCAmelCase = get_tests_dir("""fixtures""")
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =mock.Mock()
SCREAMING_SNAKE_CASE_: Tuple =500
SCREAMING_SNAKE_CASE_: Union[str, Any] ={}
SCREAMING_SNAKE_CASE_: str =HTTPError
SCREAMING_SNAKE_CASE_: Any ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE_: Union[str, Any] =ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=lowerCAmelCase ) as mock_head:
SCREAMING_SNAKE_CASE_: List[Any] =ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE_: int =AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
SCREAMING_SNAKE_CASE_: Optional[int] =AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(lowerCAmelCase )
@is_staging_test
class a ( unittest.TestCase ):
@classmethod
def lowerCamelCase__ ( cls : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =TOKEN
HfFolder.save_token(lowerCAmelCase )
@classmethod
def lowerCamelCase__ ( cls : Dict ) -> Optional[Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =ViTImageProcessor.from_pretrained(lowerCAmelCase )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: List[Any] =ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCAmelCase , repo_id="""test-image-processor""" , push_to_hub=lowerCAmelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: List[Any] =ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =ViTImageProcessor.from_pretrained(lowerCAmelCase )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: Optional[int] =ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCAmelCase , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=lowerCAmelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: Dict =ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE_: Any =CustomImageProcessor.from_pretrained(lowerCAmelCase )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
SCREAMING_SNAKE_CASE_: List[str] =AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=lowerCAmelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 409 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_UpperCAmelCase = get_tests_dir("""fixtures""")
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =mock.Mock()
SCREAMING_SNAKE_CASE_: Tuple =500
SCREAMING_SNAKE_CASE_: Union[str, Any] ={}
SCREAMING_SNAKE_CASE_: str =HTTPError
SCREAMING_SNAKE_CASE_: Any ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE_: Union[str, Any] =ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=lowerCAmelCase ) as mock_head:
SCREAMING_SNAKE_CASE_: List[Any] =ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE_: int =AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
SCREAMING_SNAKE_CASE_: Optional[int] =AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(lowerCAmelCase )
@is_staging_test
class a ( unittest.TestCase ):
@classmethod
def lowerCamelCase__ ( cls : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =TOKEN
HfFolder.save_token(lowerCAmelCase )
@classmethod
def lowerCamelCase__ ( cls : Dict ) -> Optional[Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =ViTImageProcessor.from_pretrained(lowerCAmelCase )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: List[Any] =ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCAmelCase , repo_id="""test-image-processor""" , push_to_hub=lowerCAmelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: List[Any] =ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =ViTImageProcessor.from_pretrained(lowerCAmelCase )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: Optional[int] =ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
lowerCAmelCase , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=lowerCAmelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: Dict =ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE_: Any =CustomImageProcessor.from_pretrained(lowerCAmelCase )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
SCREAMING_SNAKE_CASE_: List[str] =AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=lowerCAmelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 409 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase = logging.get_logger(__name__)
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = ['''pixel_values''']
def __init__( self , a = True , a = 32 , a=PILImageResampling.BILINEAR , a = True , **a , ) -> None:
snake_case_ = do_resize
snake_case_ = do_rescale
snake_case_ = size_divisor
snake_case_ = resample
super().__init__(**a )
def _UpperCamelCase ( self , a , a , a , a = None , **a ) -> np.ndarray:
snake_case_ , snake_case_ = get_image_size(a )
# Rounds the height and width down to the closest multiple of size_divisor
snake_case_ = height // size_divisor * size_divisor
snake_case_ = width // size_divisor * size_divisor
snake_case_ = resize(a , (new_h, new_w) , resample=a , data_format=a , **a )
return image
def _UpperCamelCase ( self , a , a , a = None , **a ) -> np.ndarray:
return rescale(image=a , scale=a , data_format=a , **a )
def _UpperCamelCase ( self , a , a = None , a = None , a=None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> BatchFeature:
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = size_divisor if size_divisor is not None else self.size_divisor
snake_case_ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
snake_case_ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(a ) for img in images]
if do_resize:
snake_case_ = [self.resize(a , size_divisor=a , resample=a ) for image in images]
if do_rescale:
snake_case_ = [self.rescale(a , scale=1 / 2_55 ) for image in images]
snake_case_ = [to_channel_dimension_format(a , a ) for image in images]
snake_case_ = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a )
| 706 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowercase = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self , a , a , a = None , a = None ) -> int:
snake_case_ = None
snake_case_ = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
snake_case_ = os.path.abspath('examples' )
for item in os.listdir(a ):
if item not in EXCLUDE_EXAMPLES:
snake_case_ = os.path.join(a , a )
if os.path.isfile(a ) and ".py" in item_path:
with self.subTest(
tested_script=a , feature_script=a , tested_section='main()' if parser_only else 'training_function()' , ):
snake_case_ = compare_against_test(
os.path.join(a , a ) , a , a , a )
snake_case_ = '\n'.join(a )
if special_strings is not None:
for string in special_strings:
snake_case_ = diff.replace(a , '' )
self.assertEqual(a , '' )
def _UpperCamelCase ( self ) -> Optional[Any]:
self.one_complete_example('complete_nlp_example.py' , a )
self.one_complete_example('complete_nlp_example.py' , a )
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
snake_case_ = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , a , a , a )
self.one_complete_example('complete_cv_example.py' , a , a , a )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = False
@classmethod
def _UpperCamelCase ( cls ) -> Optional[int]:
super().setUpClass()
snake_case_ = tempfile.mkdtemp()
snake_case_ = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
snake_case_ = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def _UpperCamelCase ( cls ) -> Optional[Any]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
snake_case_ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def _UpperCamelCase ( self ) -> Optional[Any]:
snake_case_ = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
'''.split()
snake_case_ = run_command(self._launch_args + testargs , return_stdout=a )
self.assertNotIn('epoch 0:' , a )
self.assertIn('epoch 1:' , a )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
'''.split()
snake_case_ = run_command(self._launch_args + testargs , return_stdout=a )
if torch.cuda.is_available():
snake_case_ = torch.cuda.device_count()
else:
snake_case_ = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , a )
self.assertIn('epoch 1:' , a )
else:
self.assertIn('epoch 0:' , a )
self.assertIn('epoch 1:' , a )
@slow
def _UpperCamelCase ( self ) -> int:
snake_case_ = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
snake_case_ = run_command(self._launch_args + testargs , return_stdout=a )
snake_case_ = re.findall('({.+})' , a )
snake_case_ = [r for r in results if 'accuracy' in r][-1]
snake_case_ = ast.literal_eval(a )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _UpperCamelCase ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
snake_case_ = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(a , 'tracking' ) ) )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 607 | 0 |
import re
from filelock import FileLock
try:
import nltk
SCREAMING_SNAKE_CASE__ : Tuple = True
except (ImportError, ModuleNotFoundError):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __lowercase ( snake_case ):
"""simple docstring"""
re.sub('''<n>''', '''''', snake_case ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(snake_case ) )
| 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt'}
__magic_name__ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__magic_name__ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__magic_name__ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ConvBertTokenizer
def __init__( self : str ,_a : Dict=None ,_a : List[Any]=None ,_a : Dict=True ,_a : List[str]="[UNK]" ,_a : Any="[SEP]" ,_a : str="[PAD]" ,_a : List[Any]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Union[str, Any]=True ,_a : Any=None ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars
):
A_ : Dict = getattr(_a ,normalizer_state.pop("""type""" ) )
A_ : str = do_lower_case
A_ : Any = strip_accents
A_ : int = tokenize_chinese_chars
A_ : Tuple = normalizer_class(**_a )
A_ : Any = do_lower_case
def _a ( self : List[Any] ,_a : List[Any] ,_a : Any=None ):
'''simple docstring'''
A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : int = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : List[Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 665 | 0 |
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class a ( __lowercase ):
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE: List[Any] = 8
# DPR tok
__SCREAMING_SNAKE_CASE: int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__SCREAMING_SNAKE_CASE: Any = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = os.path.join(_lowerCAmelCase , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
__SCREAMING_SNAKE_CASE: Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__SCREAMING_SNAKE_CASE: Optional[int] = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__SCREAMING_SNAKE_CASE: Tuple = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__SCREAMING_SNAKE_CASE: List[Any] = {'''unk_token''': '''<unk>'''}
__SCREAMING_SNAKE_CASE: Dict = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = os.path.join(_lowerCAmelCase , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
__SCREAMING_SNAKE_CASE: Union[str, Any] = os.path.join(_lowerCAmelCase , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCAmelCase ) )
def snake_case_ ( self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def snake_case_ ( self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def snake_case_ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = os.path.join(self.tmpdirname , '''rag_tokenizer''' )
__SCREAMING_SNAKE_CASE: int = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
__SCREAMING_SNAKE_CASE: Tuple = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(_lowerCAmelCase )
rag_tokenizer.save_pretrained(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = RagTokenizer.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
self.assertIsInstance(new_rag_tokenizer.question_encoder , _lowerCAmelCase )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , _lowerCAmelCase )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' )
__SCREAMING_SNAKE_CASE: int = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
__SCREAMING_SNAKE_CASE: Union[str, Any] = tokenizer(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@slow
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' )
__SCREAMING_SNAKE_CASE: Any = [
'''who got the first nobel prize in physics''',
'''when is the next deadpool movie being released''',
'''which mode is used for short wave broadcast service''',
'''who is the owner of reading football club''',
'''when is the next scandal episode coming out''',
'''when is the last time the philadelphia won the superbowl''',
'''what is the most current adobe flash player version''',
'''how many episodes are there in dragon ball z''',
'''what is the first step in the evolution of the eye''',
'''where is gall bladder situated in human body''',
'''what is the main mineral in lithium batteries''',
'''who is the president of usa right now''',
'''where do the greasers live in the outsiders''',
'''panda is a national animal of which country''',
'''what is the name of manchester united stadium''',
]
__SCREAMING_SNAKE_CASE: Tuple = tokenizer(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
| 146 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : UNetaDModel
SCREAMING_SNAKE_CASE__ : ScoreSdeVeScheduler
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
@torch.no_grad()
def __call__( self , _lowerCAmelCase = 1 , _lowerCAmelCase = 2000 , _lowerCAmelCase = None , _lowerCAmelCase = "pil" , _lowerCAmelCase = True , **_lowerCAmelCase , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.unet.config.sample_size
__SCREAMING_SNAKE_CASE: str = (batch_size, 3, img_size, img_size)
__SCREAMING_SNAKE_CASE: List[str] = self.unet
__SCREAMING_SNAKE_CASE: Optional[Any] = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase ) * self.scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE: Any = sample.to(self.device )
self.scheduler.set_timesteps(_lowerCAmelCase )
self.scheduler.set_sigmas(_lowerCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__SCREAMING_SNAKE_CASE: Optional[Any] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__SCREAMING_SNAKE_CASE: int = self.unet(_lowerCAmelCase , _lowerCAmelCase ).sample
__SCREAMING_SNAKE_CASE: int = self.scheduler.step_correct(_lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
# prediction step
__SCREAMING_SNAKE_CASE: str = model(_lowerCAmelCase , _lowerCAmelCase ).sample
__SCREAMING_SNAKE_CASE: List[str] = self.scheduler.step_pred(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[int] = output.prev_sample, output.prev_sample_mean
__SCREAMING_SNAKE_CASE: int = sample_mean.clamp(0 , 1 )
__SCREAMING_SNAKE_CASE: int = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE: Tuple = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 146 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, *A, **A ):
'''simple docstring'''
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.', A, )
super().__init__(*A, **A )
| 28 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : int = {'''configuration_ibert''': ['''IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''IBertConfig''', '''IBertOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
'''IBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''IBertForMaskedLM''',
'''IBertForMultipleChoice''',
'''IBertForQuestionAnswering''',
'''IBertForSequenceClassification''',
'''IBertForTokenClassification''',
'''IBertModel''',
'''IBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 239 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase , __UpperCamelCase : Any = image.size
__UpperCamelCase , __UpperCamelCase : List[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__UpperCamelCase : int = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
__UpperCamelCase : Union[str, Any] = np.array(snake_case__ ).astype(np.floataa ) / 255.0
__UpperCamelCase : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
__UpperCamelCase : Union[str, Any] = torch.from_numpy(snake_case__ )
return 2.0 * image - 1.0
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> int:
super().__init__()
self.register_modules(vqvae=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__(self , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = 1_0_0 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(_UpperCAmelCase , PIL.Image.Image ):
__UpperCamelCase : Any = 1
elif isinstance(_UpperCAmelCase , torch.Tensor ):
__UpperCamelCase : Optional[Any] = image.shape[0]
else:
raise ValueError(f"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_UpperCAmelCase )}" )
if isinstance(_UpperCAmelCase , PIL.Image.Image ):
__UpperCamelCase : Tuple = preprocess(_UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase : Any = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__UpperCamelCase : int = (batch_size, self.unet.config.in_channels // 2, height, width)
__UpperCamelCase : Optional[int] = next(self.unet.parameters() ).dtype
__UpperCamelCase : int = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase )
__UpperCamelCase : int = image.to(device=self.device , dtype=_UpperCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_UpperCAmelCase , device=self.device )
__UpperCamelCase : Dict = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__UpperCamelCase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__UpperCamelCase : Any = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__UpperCamelCase : Union[str, Any] = {}
if accepts_eta:
__UpperCamelCase : Dict = eta
for t in self.progress_bar(_UpperCAmelCase ):
# concat latents and low resolution image in the channel dimension.
__UpperCamelCase : Tuple = torch.cat([latents, image] , dim=1 )
__UpperCamelCase : Optional[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# predict the noise residual
__UpperCamelCase : Tuple = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__UpperCamelCase : Any = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
# decode the image latents with the VQVAE
__UpperCamelCase : Union[str, Any] = self.vqvae.decode(_UpperCAmelCase ).sample
__UpperCamelCase : List[str] = torch.clamp(_UpperCAmelCase , -1.0 , 1.0 )
__UpperCamelCase : Optional[int] = image / 2 + 0.5
__UpperCamelCase : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase : int = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 399 |
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
if n == 1 or not isinstance(snake_case__ , snake_case__ ):
return 0
elif n == 2:
return 1
else:
__UpperCamelCase : str = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Dict = 0
__UpperCamelCase : Any = 2
while digits < n:
index += 1
__UpperCamelCase : Dict = len(str(fibonacci(snake_case__ ) ) )
return index
def __lowerCAmelCase ( snake_case__ = 1_000 ):
return fibonacci_digits_index(snake_case__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 399 | 1 |
"""simple docstring"""
def snake_case ( A__ ):
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowerCamelCase_ = int(input('''Enter number: ''').strip())
print(f'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 95 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_lowerCamelCase : str = logging.getLogger(__name__)
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=3_0522, type=int)
_lowerCamelCase : int = parser.parse_args()
logger.info(f"Loading data from {args.data_file}")
with open(args.data_file, 'rb') as fp:
_lowerCamelCase : Union[str, Any] = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
_lowerCamelCase : str = Counter()
for tk_ids in data:
counter.update(tk_ids)
_lowerCamelCase : List[str] = [0] * args.vocab_size
for k, v in counter.items():
_lowerCamelCase : Dict = v
logger.info(f"Dump to {args.token_counts_dump}")
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 121 | 0 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCamelCase = float('''nan''')
class snake_case_ :
"""simple docstring"""
def __init__( self , _A ):
__lowerCAmelCase = sys.stdout
__lowerCAmelCase = open(_A , 'a' )
def __getattr__( self , _A ):
return getattr(self.stdout , _A )
def A__ ( self , _A ):
self.stdout.write(_A )
# strip tqdm codes
self.file.write(re.sub(R'^.*\r' , '' , _A , 0 , re.M ) )
def __lowercase ( UpperCAmelCase__=80 , UpperCAmelCase__=False ):
"""simple docstring"""
__lowerCAmelCase = []
# deal with critical env vars
__lowerCAmelCase = ['CUDA_VISIBLE_DEVICES']
for key in env_keys:
__lowerCAmelCase = os.environ.get(UpperCAmelCase__ , UpperCAmelCase__ )
if val is not None:
cmd.append(F"""{key}={val}""" )
# python executable (not always needed if the script is executable)
__lowerCAmelCase = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(UpperCAmelCase__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__lowerCAmelCase = []
__lowerCAmelCase = ''
while len(UpperCAmelCase__ ) > 0:
current_line += F"""{cmd.pop(0 )} """
if len(UpperCAmelCase__ ) == 0 or len(UpperCAmelCase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCAmelCase__ )
__lowerCAmelCase = ''
return "\\\n".join(UpperCAmelCase__ )
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = re.sub(R'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
__lowerCAmelCase = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += F""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
__lowerCAmelCase = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , )
__lowerCAmelCase = subprocess.run(UpperCAmelCase__ , capture_output=UpperCAmelCase__ , text=UpperCAmelCase__ )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
__lowerCAmelCase = variation.replace(' ' , '-' )
with open(Path(UpperCAmelCase__ ) / F"""log.{prefix}.stdout.txt""" , 'w' ) as f:
f.write(result.stdout )
with open(Path(UpperCAmelCase__ ) / F"""log.{prefix}.stderr.txt""" , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(F"""{output_dir}/all_results.json""" , 'r' , encoding='utf-8' ) as f:
__lowerCAmelCase = json.load(UpperCAmelCase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
"""simple docstring"""
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = F"""{id}: {variation:<{longest_variation_len}}"""
__lowerCAmelCase = F"""{preamble}: """
__lowerCAmelCase = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCAmelCase__ ) , desc=UpperCAmelCase__ , leave=UpperCAmelCase__ ):
__lowerCAmelCase = process_run_single(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__lowerCAmelCase = single_run_metrics[target_metric_key]
if not math.isnan(UpperCAmelCase__ ):
metrics.append(UpperCAmelCase__ )
results.append(UpperCAmelCase__ )
outcome += "✓"
else:
outcome += "✘"
__lowerCAmelCase = F"""\33[2K\r{outcome}"""
if len(UpperCAmelCase__ ) > 0:
__lowerCAmelCase = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__lowerCAmelCase = round(mean_metrics[target_metric_key] , 2 )
__lowerCAmelCase = F"""{outcome} {mean_target}"""
if len(UpperCAmelCase__ ) > 1:
results_str += F""" {tuple(round(UpperCAmelCase__ , 2 ) for x in results )}"""
print(UpperCAmelCase__ )
__lowerCAmelCase = variation
return mean_metrics
else:
print(UpperCAmelCase__ )
return {variation_key: variation, target_metric_key: nan}
def __lowercase ( ):
"""simple docstring"""
__lowerCAmelCase = torch.cuda.get_device_properties(torch.device('cuda' ) )
return F"""
Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = pd.DataFrame(UpperCAmelCase__ )
__lowerCAmelCase = 'variation'
__lowerCAmelCase = 'diff_%'
__lowerCAmelCase = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__lowerCAmelCase = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCAmelCase__ ):
# as a fallback, use the minimal value as the sentinel
__lowerCAmelCase = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCAmelCase__ ):
__lowerCAmelCase = df.apply(
lambda UpperCAmelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
__lowerCAmelCase = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__lowerCAmelCase = df.reindex(UpperCAmelCase__ , axis='columns' ) # reorder cols
# capitalize
__lowerCAmelCase = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
__lowerCAmelCase = df.rename(lambda UpperCAmelCase__ : c.replace('_' , '<br>' ) , axis='columns' )
__lowerCAmelCase = df.rename(lambda UpperCAmelCase__ : c.replace('_' , '\n' ) , axis='columns' )
__lowerCAmelCase = ['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCAmelCase__ , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCAmelCase__ , floatfmt='.2f' )]
print('\n\n'.join(UpperCAmelCase__ ) )
def __lowercase ( ):
"""simple docstring"""
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='Base cmd' , )
parser.add_argument(
'--variations' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , nargs='+' , required=UpperCAmelCase__ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=UpperCAmelCase__ , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=UpperCAmelCase__ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=UpperCAmelCase__ , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=UpperCAmelCase__ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=UpperCAmelCase__ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = args.output_dir
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
__lowerCAmelCase = get_base_command(UpperCAmelCase__ , UpperCAmelCase__ )
# split each dimension into its --foo variations
__lowerCAmelCase = [list(map(str.strip , re.split(R'\|' , UpperCAmelCase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__lowerCAmelCase = list(map(str.strip , map(' '.join , itertools.product(*UpperCAmelCase__ ) ) ) )
__lowerCAmelCase = max(len(UpperCAmelCase__ ) for x in variations )
# split wanted keys
__lowerCAmelCase = args.report_metric_keys.split()
# capture prints into a log file for convenience
__lowerCAmelCase = F"""benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"""
print(F"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(F"""and this script's output is also piped into {report_fn}""" )
__lowerCAmelCase = Tee(UpperCAmelCase__ )
print(F"""\n*** Running {len(UpperCAmelCase__ )} benchmarks:""" )
print(F"""Base command: {' '.join(UpperCAmelCase__ )}""" )
__lowerCAmelCase = 'variation'
__lowerCAmelCase = []
for id, variation in enumerate(tqdm(UpperCAmelCase__ , desc='Total completion: ' , leave=UpperCAmelCase__ ) ):
__lowerCAmelCase = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , args.target_metric_key , UpperCAmelCase__ , args.repeat_times , UpperCAmelCase__ , args.verbose , ) )
process_results(UpperCAmelCase__ , args.target_metric_key , UpperCAmelCase__ , args.base_variation , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 102 |
from __future__ import annotations
from collections.abc import MutableSequence
class snake_case_ :
"""simple docstring"""
def __init__( self , _A , _A ):
if len(_A ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
__lowerCAmelCase = list(_A )
__lowerCAmelCase = degree
def __add__( self , _A ):
if self.degree > polynomial_a.degree:
__lowerCAmelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _A )
else:
__lowerCAmelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _A )
def __sub__( self , _A ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , _A ):
__lowerCAmelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _A )
def A__ ( self , _A ):
__lowerCAmelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
__lowerCAmelCase = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_A )
return polynomial
def __repr__( self ):
return self.__str__()
def A__ ( self ):
__lowerCAmelCase = [0] * self.degree
for i in range(self.degree ):
__lowerCAmelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _A )
def A__ ( self , _A = 0 ):
__lowerCAmelCase = [0] * (self.degree + 2)
__lowerCAmelCase = constant
for i in range(self.degree + 1 ):
__lowerCAmelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _A )
def __eq__( self , _A ):
if not isinstance(_A , _A ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , _A ):
return not self.__eq__(_A )
| 102 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _snake_case (__SCREAMING_SNAKE_CASE):
@staticmethod
@abstractmethod
def UpperCamelCase__ ( _snake_case ):
raise NotImplementedError()
@abstractmethod
def UpperCamelCase__ ( self ):
raise NotImplementedError()
| 71 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
A_ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
A_ = logging.WARNING
def UpperCAmelCase ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = os.getenv('''DATASETS_VERBOSITY''' ,UpperCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def UpperCAmelCase ( )-> str:
'''simple docstring'''
return __name__.split('''.''' )[0]
def UpperCAmelCase ( )-> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def UpperCAmelCase ( UpperCAmelCase = None )-> logging.Logger:
'''simple docstring'''
if name is None:
SCREAMING_SNAKE_CASE_ = _get_library_name()
return logging.getLogger(UpperCAmelCase )
def UpperCAmelCase ( )-> int:
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase ( UpperCAmelCase )-> None:
'''simple docstring'''
_get_library_root_logger().setLevel(UpperCAmelCase )
def UpperCAmelCase ( )-> Optional[Any]:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> Union[str, Any]:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> Dict:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> Optional[int]:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = False
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class snake_case :
'''simple docstring'''
def __init__( self : Tuple , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : str ) -> Optional[int]: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = args[0] if args else None
def __iter__( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> int:
"""simple docstring"""
def empty_fn(*lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : str ) -> List[str]:
"""simple docstring"""
return self
def __exit__( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> Dict:
"""simple docstring"""
return
A_ = True
class snake_case :
'''simple docstring'''
def __call__( self : Union[str, Any] , *lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]=False , **lowerCAmelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ )
else:
return EmptyTqdm(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowercase ( self : int , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowercase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A_ = _tqdm_cls()
def UpperCAmelCase ( )-> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase ( )-> List[str]:
'''simple docstring'''
global _tqdm_active
SCREAMING_SNAKE_CASE_ = True
def UpperCAmelCase ( )-> Optional[int]:
'''simple docstring'''
global _tqdm_active
SCREAMING_SNAKE_CASE_ = False
| 393 | 0 |
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _A ( unittest.TestCase ):
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = 3
lowercase = 250
lowercase = ids_tensor((batch_size, length) , __lowerCAmelCase )
lowercase = torch.ones((batch_size, length) , device=__lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase = self._get_tensors(5 )
lowercase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase , lowercase = self._get_tensors(9 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase , lowercase = self._get_tensors(10 )
self.assertTrue(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
def A__ ( self ):
"""simple docstring"""
lowercase = MaxLengthCriteria(max_length=10 )
lowercase , lowercase = self._get_tensors(5 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase , lowercase = self._get_tensors(9 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase , lowercase = self._get_tensors(10 )
self.assertTrue(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
def A__ ( self ):
"""simple docstring"""
lowercase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowercase , lowercase = self._get_tensors(5 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase , lowercase = self._get_tensors(9 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase , lowercase = self._get_tensors(10 )
self.assertTrue(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase = self._get_tensors(5 )
lowercase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(__lowerCAmelCase , __lowerCAmelCase ) )
def A__ ( self ):
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(__lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowercase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(__lowerCAmelCase ) , 1 )
| 711 | """simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCAmelCase : int ={"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class _A ( unittest.TestCase ):
snake_case__ : Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case__ : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
snake_case__ : Optional[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
snake_case__ : List[Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def A__ ( self ):
"""simple docstring"""
lowercase = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
lowercase = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}] )
lowercase = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
] , )
lowercase = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
# Legacy behavior
lowercase = text_classifier("""This is great !""" , return_all_scores=__lowerCAmelCase )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
lowercase = text_classifier("""This is great !""" , return_all_scores=__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}]] )
lowercase = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
] , )
lowercase = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{"""label""": """LABEL_0""", """score""": 0.5_0_4},
{"""label""": """LABEL_0""", """score""": 0.5_0_4},
] , )
@require_torch
def A__ ( self ):
"""simple docstring"""
import torch
lowercase = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
@require_tf
def A__ ( self ):
"""simple docstring"""
lowercase = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
@slow
@require_torch
def A__ ( self ):
"""simple docstring"""
lowercase = pipeline("""text-classification""" )
lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
lowercase = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
lowercase = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.9_8_8}] )
@slow
@require_tf
def A__ ( self ):
"""simple docstring"""
lowercase = pipeline("""text-classification""" , framework="""tf""" )
lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
lowercase = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
lowercase = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.9_8_8}] )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = TextClassificationPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowercase = """HuggingFace is in"""
lowercase = text_classifier(__lowerCAmelCase )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
lowercase = ["""HuggingFace is in """, """Paris is in France"""]
lowercase = text_classifier(__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}, {"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowercase = text_classifier(__lowerCAmelCase , top_k=__lowerCAmelCase )
lowercase = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [[{"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}] * N, [{"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}] * N] , )
lowercase = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
lowercase = text_classifier(__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowercase = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(__lowerCAmelCase ):
text_classifier(__lowerCAmelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowercase = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 197 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Union[str, Any] = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : str = ["AlbertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = ["AlbertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 390 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
a__ = None
a__ = logging.get_logger(__name__)
a__ = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
a__ = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
a__ = {
"""google/rembert""": 2_56,
}
a__ = """▁"""
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Union[str, Any] = VOCAB_FILES_NAMES
snake_case_ : Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : str = RemBertTokenizer
def __init__( self : List[str] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Tuple=False , lowerCAmelCase : Union[str, Any]="[CLS]" , lowerCAmelCase : List[Any]="[SEP]" , lowerCAmelCase : List[str]="<unk>" , lowerCAmelCase : int="[SEP]" , lowerCAmelCase : Optional[Any]="<pad>" , lowerCAmelCase : Union[str, Any]="[CLS]" , lowerCAmelCase : Optional[Any]="[MASK]" , **lowerCAmelCase : Optional[int] , ) -> Tuple:
"""simple docstring"""
_snake_case : Optional[Any] = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else mask_token
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , remove_space=lowerCAmelCase , keep_accents=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , **lowerCAmelCase , )
_snake_case : List[str] = do_lower_case
_snake_case : Optional[int] = remove_space
_snake_case : List[Any] = keep_accents
_snake_case : Optional[Any] = vocab_file
_snake_case : Any = False if not self.vocab_file else True
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_snake_case : Any = [self.sep_token_id]
_snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""")
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase)) + [1] + ([0] * len(lowerCAmelCase)) + [1]
return [1] + ([0] * len(lowerCAmelCase)) + [1]
def UpperCamelCase_ ( self : int , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_snake_case : List[str] = [self.sep_token_id]
_snake_case : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase_ ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase):
logger.error("""Vocabulary path ({}) should be a directory""".format(lowerCAmelCase))
return
_snake_case : Optional[Any] = os.path.join(
lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase):
copyfile(self.vocab_file , lowerCAmelCase)
return (out_vocab_file,)
| 477 | 0 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : List[str] , snake_case_ : NestedDataStructureLike[PathLike] , snake_case_ : Optional[NamedSplit] = None , snake_case_ : Optional[Features] = None , snake_case_ : str = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[str] = None , snake_case_ : Optional[int] = None , **snake_case_ : Dict , ):
super().__init__(
snake_case_ , split=snake_case_ , features=snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ , streaming=snake_case_ , num_proc=snake_case_ , **snake_case_ , )
snake_case__ : int = field
snake_case__ : Any = path_or_paths if isinstance(snake_case_ , snake_case_ ) else {self.split: path_or_paths}
snake_case__ : str = Json(
cache_dir=snake_case_ , data_files=snake_case_ , features=snake_case_ , field=snake_case_ , **snake_case_ , )
def lowerCamelCase ( self : List[str] ):
# Build iterable dataset
if self.streaming:
snake_case__ : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case__ : str = None
snake_case__ : Union[str, Any] = None
snake_case__ : Union[str, Any] = None
snake_case__ : Optional[Any] = None
self.builder.download_and_prepare(
download_config=snake_case_ , download_mode=snake_case_ , verification_mode=snake_case_ , base_path=snake_case_ , num_proc=self.num_proc , )
snake_case__ : List[Any] = self.builder.as_dataset(
split=self.split , verification_mode=snake_case_ , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , snake_case_ : Dataset , snake_case_ : Union[PathLike, BinaryIO] , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None , **snake_case_ : Tuple , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0." )
snake_case__ : Any = dataset
snake_case__ : int = path_or_buf
snake_case__ : Tuple = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
snake_case__ : Optional[Any] = num_proc
snake_case__ : str = """utf-8"""
snake_case__ : List[str] = to_json_kwargs
def lowerCamelCase ( self : int ):
snake_case__ : List[Any] = self.to_json_kwargs.pop("""path_or_buf""" , snake_case_ )
snake_case__ : List[str] = self.to_json_kwargs.pop("""orient""" , """records""" )
snake_case__ : List[str] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
snake_case__ : List[Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
snake_case__ : Optional[int] = self.to_json_kwargs.pop("""compression""" , snake_case_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"`datasets` currently does not support {compression} compression" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=snake_case_ ) as buffer:
snake_case__ : Optional[Any] = self._write(file_obj=snake_case_ , orient=snake_case_ , lines=snake_case_ , index=snake_case_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"The compression parameter is not supported when writing to a buffer, but compression={compression}"
""" was passed. Please provide a local path instead.""" )
snake_case__ : Optional[Any] = self._write(
file_obj=self.path_or_buf , orient=snake_case_ , lines=snake_case_ , index=snake_case_ , **self.to_json_kwargs )
return written
def lowerCamelCase ( self : List[Any] , snake_case_ : Union[str, Any] ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : Tuple = args
snake_case__ : List[str] = query_table(
table=self.dataset.data , key=slice(snake_case_ , offset + self.batch_size ) , indices=self.dataset._indices , )
snake_case__ : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=snake_case_ , orient=snake_case_ , lines=snake_case_ , index=snake_case_ , **snake_case_ )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCamelCase ( self : Dict , snake_case_ : BinaryIO , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : str , **snake_case_ : Tuple , ):
snake_case__ : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
snake_case__ : int = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(snake_case_ )
else:
snake_case__ , snake_case__ : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , snake_case_ , snake_case_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(snake_case_ )
return written
| 301 |
'''simple docstring'''
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
__a = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , ) -> Tuple:
output_path.parent.mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , use_external_data_format=_lowerCAmelCase , enable_onnx_checker=_lowerCAmelCase , opset_version=_lowerCAmelCase , )
else:
export(
_lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , opset_version=_lowerCAmelCase , )
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False ) -> Optional[Any]:
snake_case__ : List[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
snake_case__ : Dict = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
snake_case__ : Optional[Any] = """cpu"""
snake_case__ : Dict = StableDiffusionPipeline.from_pretrained(_lowerCAmelCase , torch_dtype=_lowerCAmelCase ).to(_lowerCAmelCase )
snake_case__ : List[Any] = Path(_lowerCAmelCase )
# TEXT ENCODER
snake_case__ : int = pipeline.text_encoder.config.max_position_embeddings
snake_case__ : List[str] = pipeline.text_encoder.config.hidden_size
snake_case__ : List[Any] = pipeline.tokenizer(
"""A sample prompt""" , padding="""max_length""" , max_length=pipeline.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="""pt""" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=_lowerCAmelCase , dtype=torch.intaa )) , output_path=output_path / """text_encoder""" / """model.onnx""" , ordered_input_names=["""input_ids"""] , output_names=["""last_hidden_state""", """pooler_output"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """sequence"""},
} , opset=_lowerCAmelCase , )
del pipeline.text_encoder
# UNET
snake_case__ : str = pipeline.unet.config.in_channels
snake_case__ : Union[str, Any] = pipeline.unet.config.sample_size
snake_case__ : int = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
torch.randn(2 ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
torch.randn(2 , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
False,
) , output_path=_lowerCAmelCase , ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] , output_names=["""out_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""timestep""": {0: """batch"""},
"""encoder_hidden_states""": {0: """batch""", 1: """sequence"""},
} , opset=_lowerCAmelCase , use_external_data_format=_lowerCAmelCase , )
snake_case__ : Union[str, Any] = str(unet_path.absolute().as_posix() )
snake_case__ : Optional[Any] = os.path.dirname(_lowerCAmelCase )
snake_case__ : Tuple = onnx.load(_lowerCAmelCase )
# clean up existing tensor files
shutil.rmtree(_lowerCAmelCase )
os.mkdir(_lowerCAmelCase )
# collate external tensor files into one
onnx.save_model(
_lowerCAmelCase , _lowerCAmelCase , save_as_external_data=_lowerCAmelCase , all_tensors_to_one_file=_lowerCAmelCase , location="""weights.pb""" , convert_attribute=_lowerCAmelCase , )
del pipeline.unet
# VAE ENCODER
snake_case__ : List[str] = pipeline.vae
snake_case__ : List[str] = vae_encoder.config.in_channels
snake_case__ : Union[str, Any] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
snake_case__ : Dict = lambda _lowerCAmelCase , _lowerCAmelCase : vae_encoder.encode(_lowerCAmelCase , _lowerCAmelCase )[0].sample()
onnx_export(
_lowerCAmelCase , model_args=(
torch.randn(1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
False,
) , output_path=output_path / """vae_encoder""" / """model.onnx""" , ordered_input_names=["""sample""", """return_dict"""] , output_names=["""latent_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=_lowerCAmelCase , )
# VAE DECODER
snake_case__ : Any = pipeline.vae
snake_case__ : Tuple = vae_decoder.config.latent_channels
snake_case__ : str = vae_decoder.config.out_channels
# forward only through the decoder part
snake_case__ : Optional[int] = vae_encoder.decode
onnx_export(
_lowerCAmelCase , model_args=(
torch.randn(1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=_lowerCAmelCase , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
snake_case__ : List[Any] = pipeline.safety_checker
snake_case__ : List[str] = safety_checker.config.vision_config.num_channels
snake_case__ : Any = safety_checker.config.vision_config.image_size
snake_case__ : Optional[Any] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
torch.randn(1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
) , output_path=output_path / """safety_checker""" / """model.onnx""" , ordered_input_names=["""clip_input""", """images"""] , output_names=["""out_images""", """has_nsfw_concepts"""] , dynamic_axes={
"""clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""},
} , opset=_lowerCAmelCase , )
del pipeline.safety_checker
snake_case__ : str = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" )
snake_case__ : Union[str, Any] = pipeline.feature_extractor
else:
snake_case__ : str = None
snake_case__ : Tuple = None
snake_case__ : Any = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) , scheduler=pipeline.scheduler , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(_lowerCAmelCase )
print("""ONNX pipeline saved to""" , _lowerCAmelCase )
del pipeline
del onnx_pipeline
snake_case__ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase , provider="""CPUExecutionProvider""" )
print("""ONNX pipeline is loadable""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
__a = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 301 | 1 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_lowerCamelCase : Tuple = parser.parse_args()
if args.model_type == "roberta":
_lowerCamelCase : str = RobertaForMaskedLM.from_pretrained(args.model_name)
_lowerCamelCase : Tuple = """roberta"""
elif args.model_type == "gpt2":
_lowerCamelCase : Union[str, Any] = GPTaLMHeadModel.from_pretrained(args.model_name)
_lowerCamelCase : Union[str, Any] = """transformer"""
_lowerCamelCase : List[str] = model.state_dict()
_lowerCamelCase : Optional[Any] = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_lowerCamelCase : Any = state_dict[F'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_lowerCamelCase : Optional[int] = F'''{prefix}.embeddings.{w}.weight'''
_lowerCamelCase : List[str] = state_dict[param_name]
for w in ["weight", "bias"]:
_lowerCamelCase : Union[str, Any] = F'''{prefix}.embeddings.LayerNorm.{w}'''
_lowerCamelCase : str = state_dict[param_name]
# Transformer Blocks #
_lowerCamelCase : Optional[int] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_lowerCamelCase : Optional[Any] = state_dict[
F'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
_lowerCamelCase : Dict = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_lowerCamelCase : Tuple = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_lowerCamelCase : Optional[int] = state_dict[F'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
_lowerCamelCase : Union[str, Any] = state_dict[F'''lm_head.dense.{w}''']
_lowerCamelCase : Optional[int] = state_dict[F'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_lowerCamelCase : List[Any] = state_dict[F'''{prefix}.ln_f.{w}''']
_lowerCamelCase : Optional[int] = state_dict["""lm_head.weight"""]
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 87 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __a ( a ):
"""simple docstring"""
_a = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_a = [1_4_4, 1_9_2, 2_4_0]
_a = [1_6, 3_2, 6_4, 9_6, 1_2_8, 1_6_0, 6_4_0]
elif "mobilevit_xs" in mobilevit_name:
_a = [9_6, 1_2_0, 1_4_4]
_a = [1_6, 3_2, 4_8, 6_4, 8_0, 9_6, 3_8_4]
elif "mobilevit_xxs" in mobilevit_name:
_a = [6_4, 8_0, 9_6]
_a = [1_6, 1_6, 2_4, 4_8, 6_4, 8_0, 3_2_0]
_a = 0.05
_a = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
_a = 5_1_2
_a = 1_6
_a = 2_1
_a = "pascal-voc-id2label.json"
else:
_a = 1_0_0_0
_a = "imagenet-1k-id2label.json"
_a = "huggingface/label-files"
_a = json.load(open(hf_hub_download(a, a, repo_type="dataset" ), "r" ) )
_a = {int(a ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
return config
def __a ( a, a=False ):
"""simple docstring"""
for i in range(1, 6 ):
if F'layer_{i}.' in name:
_a = name.replace(F'layer_{i}.', F'encoder.layer.{i - 1}.' )
if "conv_1." in name:
_a = name.replace("conv_1.", "conv_stem." )
if ".block." in name:
_a = name.replace(".block.", "." )
if "exp_1x1" in name:
_a = name.replace("exp_1x1", "expand_1x1" )
if "red_1x1" in name:
_a = name.replace("red_1x1", "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
_a = name.replace(".local_rep.conv_3x3.", ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
_a = name.replace(".local_rep.conv_1x1.", ".conv_1x1." )
if ".norm." in name:
_a = name.replace(".norm.", ".normalization." )
if ".conv." in name:
_a = name.replace(".conv.", ".convolution." )
if ".conv_proj." in name:
_a = name.replace(".conv_proj.", ".conv_projection." )
for i in range(0, 2 ):
for j in range(0, 4 ):
if F'.{i}.{j}.' in name:
_a = name.replace(F'.{i}.{j}.', F'.{i}.layer.{j}.' )
for i in range(2, 6 ):
for j in range(0, 4 ):
if F'.{i}.{j}.' in name:
_a = name.replace(F'.{i}.{j}.', F'.{i}.' )
if "expand_1x1" in name:
_a = name.replace("expand_1x1", "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
_a = name.replace("conv_3x3", "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
_a = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1" )
for i in range(2, 5 ):
if F'.global_rep.{i}.weight' in name:
_a = name.replace(F'.global_rep.{i}.weight', ".layernorm.weight" )
if F'.global_rep.{i}.bias' in name:
_a = name.replace(F'.global_rep.{i}.bias', ".layernorm.bias" )
if ".global_rep." in name:
_a = name.replace(".global_rep.", ".transformer." )
if ".pre_norm_mha.0." in name:
_a = name.replace(".pre_norm_mha.0.", ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
_a = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
_a = name.replace(".pre_norm_ffn.0.", ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
_a = name.replace(".pre_norm_ffn.1.", ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
_a = name.replace(".pre_norm_ffn.4.", ".output.dense." )
if ".transformer." in name:
_a = name.replace(".transformer.", ".transformer.layer." )
if ".aspp_layer." in name:
_a = name.replace(".aspp_layer.", "." )
if ".aspp_pool." in name:
_a = name.replace(".aspp_pool.", "." )
if "seg_head." in name:
_a = name.replace("seg_head.", "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
_a = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier." )
if "classifier.fc." in name:
_a = name.replace("classifier.fc.", "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
_a = "mobilevit." + name
return name
def __a ( a, a, a=False ):
"""simple docstring"""
if base_model:
_a = ""
else:
_a = "mobilevit."
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(a )
if key[:8] == "encoder.":
_a = key[8:]
if "qkv" in key:
_a = key.split("." )
_a = int(key_split[0][6:] ) - 1
_a = int(key_split[3] )
_a = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}' )
_a = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_a = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
else:
_a = val
return orig_state_dict
def __a ( ):
"""simple docstring"""
_a = "http://images.cocodataset.org/val2017/000000039769.jpg"
_a = Image.open(requests.get(a, stream=a ).raw )
return im
@torch.no_grad()
def __a ( a, a, a, a=False ):
"""simple docstring"""
_a = get_mobilevit_config(a )
# load original state_dict
_a = torch.load(a, map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
_a = MobileViTForSemanticSegmentation(a ).eval()
else:
_a = MobileViTForImageClassification(a ).eval()
_a = convert_state_dict(a, a )
model.load_state_dict(a )
# Check outputs on an image, prepared by MobileViTImageProcessor
_a = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 3_2 )
_a = image_processor(images=prepare_img(), return_tensors="pt" )
_a = model(**a )
_a = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 2_1, 3_2, 3_2)
if mobilevit_name == "deeplabv3_mobilevit_s":
_a = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_a = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_a = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3, :3, :3], a, atol=1e-4 )
else:
assert logits.shape == (1, 1_0_0_0)
if mobilevit_name == "mobilevit_s":
_a = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
_a = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
_a = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3], a, atol=1e-4 )
Path(a ).mkdir(exist_ok=a )
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(a )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(a )
if push_to_hub:
_a = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
_a = model_mapping[mobilevit_name]
image_processor.push_to_hub(a, organization="apple" )
model.push_to_hub(a, organization="apple" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 388 | 0 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
a__ = namedtuple("""covid_data""", """cases deaths recovered""")
def _UpperCAmelCase ( a : int = "https://www.worldometers.info/coronavirus/" ):
snake_case__ = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(lowerCAmelCase_ ).content ).xpath(lowerCAmelCase_ ) )
a__ = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 707 |
from collections.abc import Callable
def _UpperCAmelCase ( a : Callable[[float], float] , a : float , a : float ):
snake_case__ = a
snake_case__ = b
if function(a ) == 0: # one of the a or b is a root for the function
return a
elif function(a ) == 0:
return b
elif (
function(a ) * function(a ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
snake_case__ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(a ) == 0:
return mid
elif function(a ) * function(a ) < 0:
snake_case__ = mid
else:
snake_case__ = mid
snake_case__ = start + (end - start) / 2.0
return mid
def _UpperCAmelCase ( a : float ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 99 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCAmelCase : Dict = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCamelCase ( lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : int=8 ) -> Union[str, Any]:
'''simple docstring'''
lowercase =h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowercase =w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
super().__init__()
self.register_modules(
text_encoder=snake_case_ , tokenizer=snake_case_ , unet=snake_case_ , scheduler=snake_case_ , movq=snake_case_ , )
lowercase =2 ** (len(self.movq.config.block_out_channels ) - 1)
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if latents is None:
lowercase =randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowercase =latents.to(snake_case_ )
lowercase =latents * scheduler.init_noise_sigma
return latents
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , ):
lowercase =len(snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else 1
# get prompt text embeddings
lowercase =self.tokenizer(
snake_case_ , padding='''max_length''' , truncation=snake_case_ , max_length=77 , return_attention_mask=snake_case_ , add_special_tokens=snake_case_ , return_tensors='''pt''' , )
lowercase =text_inputs.input_ids
lowercase =self.tokenizer(snake_case_ , padding='''longest''' , return_tensors='''pt''' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(snake_case_ , snake_case_ ):
lowercase =self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
lowercase =text_input_ids.to(snake_case_ )
lowercase =text_inputs.attention_mask.to(snake_case_ )
lowercase , lowercase =self.text_encoder(
input_ids=snake_case_ , attention_mask=snake_case_ )
lowercase =prompt_embeds.repeat_interleave(snake_case_ , dim=0 )
lowercase =text_encoder_hidden_states.repeat_interleave(snake_case_ , dim=0 )
lowercase =text_mask.repeat_interleave(snake_case_ , dim=0 )
if do_classifier_free_guidance:
lowercase =42
if negative_prompt is None:
lowercase =[''''''] * batch_size
elif type(snake_case_ ) is not type(snake_case_ ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(snake_case_ )} !='
f' {type(snake_case_ )}.' )
elif isinstance(snake_case_ , snake_case_ ):
lowercase =[negative_prompt]
elif batch_size != len(snake_case_ ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(snake_case_ )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
''' the batch size of `prompt`.''' )
else:
lowercase =negative_prompt
lowercase =self.tokenizer(
snake_case_ , padding='''max_length''' , max_length=77 , truncation=snake_case_ , return_attention_mask=snake_case_ , add_special_tokens=snake_case_ , return_tensors='''pt''' , )
lowercase =uncond_input.input_ids.to(snake_case_ )
lowercase =uncond_input.attention_mask.to(snake_case_ )
lowercase , lowercase =self.text_encoder(
input_ids=snake_case_ , attention_mask=snake_case_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase =negative_prompt_embeds.shape[1]
lowercase =negative_prompt_embeds.repeat(1 , snake_case_ )
lowercase =negative_prompt_embeds.view(batch_size * num_images_per_prompt , snake_case_ )
lowercase =uncond_text_encoder_hidden_states.shape[1]
lowercase =uncond_text_encoder_hidden_states.repeat(1 , snake_case_ , 1 )
lowercase =uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , snake_case_ , -1 )
lowercase =uncond_text_mask.repeat_interleave(snake_case_ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase =torch.cat([negative_prompt_embeds, prompt_embeds] )
lowercase =torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowercase =torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def _A( self , snake_case_=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase =torch.device(f'cuda:{gpu_id}' )
lowercase =[
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case_ , snake_case_ )
def _A( self , snake_case_=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
lowercase =torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=snake_case_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase =None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowercase , lowercase =cpu_offload_with_hook(snake_case_ , snake_case_ , prev_module_hook=snake_case_ )
if self.safety_checker is not None:
lowercase , lowercase =cpu_offload_with_hook(self.safety_checker , snake_case_ , prev_module_hook=snake_case_ )
# We'll offload the last model manually.
lowercase =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _A( self ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , snake_case_ = 5_12 , snake_case_ = 5_12 , snake_case_ = 1_00 , snake_case_ = 4.0 , snake_case_ = 1 , snake_case_ = None , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , ):
if isinstance(snake_case_ , snake_case_ ):
lowercase =1
elif isinstance(snake_case_ , snake_case_ ):
lowercase =len(snake_case_ )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(snake_case_ )}' )
lowercase =self._execution_device
lowercase =batch_size * num_images_per_prompt
lowercase =guidance_scale > 1.0
lowercase , lowercase , lowercase =self._encode_prompt(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
lowercase =torch.cat(snake_case_ , dim=0 )
if isinstance(snake_case_ , snake_case_ ):
lowercase =torch.cat(snake_case_ , dim=0 )
if do_classifier_free_guidance:
lowercase =image_embeds.repeat_interleave(snake_case_ , dim=0 )
lowercase =negative_image_embeds.repeat_interleave(snake_case_ , dim=0 )
lowercase =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=snake_case_ )
self.scheduler.set_timesteps(snake_case_ , device=snake_case_ )
lowercase =self.scheduler.timesteps
lowercase =self.unet.config.in_channels
lowercase , lowercase =get_new_h_w(snake_case_ , snake_case_ , self.movq_scale_factor )
# create initial latent
lowercase =self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , snake_case_ , snake_case_ , snake_case_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(snake_case_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase ={'''text_embeds''': prompt_embeds, '''image_embeds''': image_embeds}
lowercase =self.unet(
sample=snake_case_ , timestep=snake_case_ , encoder_hidden_states=snake_case_ , added_cond_kwargs=snake_case_ , return_dict=snake_case_ , )[0]
if do_classifier_free_guidance:
lowercase , lowercase =noise_pred.split(latents.shape[1] , dim=1 )
lowercase , lowercase =noise_pred.chunk(2 )
lowercase , lowercase =variance_pred.chunk(2 )
lowercase =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase =torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase , lowercase =noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase =self.scheduler.step(
snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ , ).prev_sample
# post-processing
lowercase =self.movq.decode(snake_case_ , force_not_quantize=snake_case_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
lowercase =image * 0.5 + 0.5
lowercase =image.clamp(0 , 1 )
lowercase =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase =self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 72 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( a_ , a_ ):
"""simple docstring"""
lowercase__ = "bit"
lowercase__ = ["preactivation", "bottleneck"]
lowercase__ = ["SAME", "VALID"]
def __init__( self : int ,lowercase_ : Optional[int]=3 ,lowercase_ : int=6_4 ,lowercase_ : Optional[int]=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] ,lowercase_ : Union[str, Any]=[3, 4, 6, 3] ,lowercase_ : List[Any]="preactivation" ,lowercase_ : Dict="relu" ,lowercase_ : Dict=None ,lowercase_ : Optional[Any]=3_2 ,lowercase_ : Any=0.0 ,lowercase_ : Optional[Any]=False ,lowercase_ : List[Any]=3_2 ,lowercase_ : Optional[Any]=1 ,lowercase_ : Optional[int]=None ,lowercase_ : int=None ,**lowercase_ : List[str] ,):
super().__init__(**lowercase_ )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
lowerCAmelCase__ : Optional[Any] = global_padding.upper()
else:
raise ValueError(F'Padding strategy {global_padding} not supported' )
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : int = embedding_size
lowerCAmelCase__ : Any = hidden_sizes
lowerCAmelCase__ : int = depths
lowerCAmelCase__ : Union[str, Any] = layer_type
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : int = global_padding
lowerCAmelCase__ : List[Any] = num_groups
lowerCAmelCase__ : List[Any] = drop_path_rate
lowerCAmelCase__ : Union[str, Any] = embedding_dynamic_padding
lowerCAmelCase__ : int = output_stride
lowerCAmelCase__ : Optional[Any] = width_factor
lowerCAmelCase__ : List[str] = ['''stem'''] + [F'stage{idx}' for idx in range(1 ,len(lowercase_ ) + 1 )]
lowerCAmelCase__ ,lowerCAmelCase__ : Any = get_aligned_output_features_output_indices(
out_features=lowercase_ ,out_indices=lowercase_ ,stage_names=self.stage_names )
| 450 | 0 |
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
__a : Dict = 0
__a : int = len(_lowerCamelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__a : Optional[int] = i + 1
else:
__a : Optional[int] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{two_pointer([2, 7, 11, 15], 9) = }')
| 63 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "vit_msn"
def __init__(self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1e-06 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : int = hidden_size
__a : str = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[Any] = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Tuple = hidden_dropout_prob
__a : Any = attention_probs_dropout_prob
__a : List[Any] = initializer_range
__a : Any = layer_norm_eps
__a : Dict = image_size
__a : List[Any] = patch_size
__a : Dict = num_channels
__a : Optional[Any] = qkv_bias
| 63 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCAmelCase_ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 |
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCAmelCase_ ( __A : Features ):
'''simple docstring'''
snake_case: Any = np.inf
def set_batch_size(__A : FeatureType ) -> None:
nonlocal batch_size
if isinstance(__A , __A ):
snake_case: str = min(__A , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__A , __A ):
snake_case: Optional[int] = min(__A , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__A , __A ) and feature.dtype == "binary":
snake_case: Union[str, Any] = min(__A , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__A , __A )
return None if batch_size is np.inf else batch_size
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
snake_case: str = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
snake_case: int = _PACKAGED_DATASETS_MODULES['parquet'][1]
snake_case: Optional[Any] = Parquet(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , hash=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self ):
'''simple docstring'''
if self.streaming:
snake_case: Union[str, Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case: Dict = None
snake_case: List[Any] = None
snake_case: Optional[int] = None
snake_case: Any = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
snake_case: List[Any] = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
snake_case: Union[str, Any] = dataset
snake_case: Any = path_or_buf
snake_case: Dict = batch_size or get_writer_batch_size(dataset.features )
snake_case: Optional[Any] = parquet_writer_kwargs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
snake_case: Any = self._write(file_obj=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , **self.parquet_writer_kwargs )
else:
snake_case: Optional[int] = self._write(file_obj=self.path_or_buf , batch_size=SCREAMING_SNAKE_CASE__ , **self.parquet_writer_kwargs )
return written
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: List[str] = 0
snake_case: List[Any] = parquet_writer_kwargs.pop('path_or_buf' , SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.dataset.features.arrow_schema
snake_case: Optional[int] = pq.ParquetWriter(SCREAMING_SNAKE_CASE__ , schema=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , SCREAMING_SNAKE_CASE__ ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
snake_case: Any = query_table(
table=self.dataset._data , key=slice(SCREAMING_SNAKE_CASE__ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(SCREAMING_SNAKE_CASE__ )
written += batch.nbytes
writer.close()
return written | 329 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ={
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A__( _UpperCAmelCase ):
lowerCAmelCase = '''wav2vec2'''
def __init__( self : Any , __SCREAMING_SNAKE_CASE : str=32 , __SCREAMING_SNAKE_CASE : Dict=7_68 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Tuple=12 , __SCREAMING_SNAKE_CASE : Optional[Any]=30_72 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1E-5 , __SCREAMING_SNAKE_CASE : Optional[int]="group" , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __SCREAMING_SNAKE_CASE : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __SCREAMING_SNAKE_CASE : int=(10, 3, 3, 3, 3, 2, 2) , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_28 , __SCREAMING_SNAKE_CASE : str=16 , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]=0.05 , __SCREAMING_SNAKE_CASE : List[Any]=10 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[int]=0.0 , __SCREAMING_SNAKE_CASE : Dict=10 , __SCREAMING_SNAKE_CASE : Optional[int]=0 , __SCREAMING_SNAKE_CASE : int=3_20 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=1_00 , __SCREAMING_SNAKE_CASE : Tuple=2_56 , __SCREAMING_SNAKE_CASE : Dict=2_56 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Tuple="sum" , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=2_56 , __SCREAMING_SNAKE_CASE : Optional[Any]=(5_12, 5_12, 5_12, 5_12, 15_00) , __SCREAMING_SNAKE_CASE : List[Any]=(5, 3, 3, 1, 1) , __SCREAMING_SNAKE_CASE : Dict=(1, 2, 3, 1, 1) , __SCREAMING_SNAKE_CASE : Tuple=5_12 , __SCREAMING_SNAKE_CASE : List[str]=0 , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : List[Any]=3 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Any , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(A_ )
__SCREAMING_SNAKE_CASE = list(A_ )
__SCREAMING_SNAKE_CASE = list(A_ )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = do_stable_layer_norm
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__SCREAMING_SNAKE_CASE = num_codevectors_per_group
__SCREAMING_SNAKE_CASE = num_codevector_groups
__SCREAMING_SNAKE_CASE = contrastive_logits_temperature
__SCREAMING_SNAKE_CASE = feat_quantizer_dropout
__SCREAMING_SNAKE_CASE = num_negatives
__SCREAMING_SNAKE_CASE = codevector_dim
__SCREAMING_SNAKE_CASE = proj_codevector_dim
__SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# adapter
__SCREAMING_SNAKE_CASE = add_adapter
__SCREAMING_SNAKE_CASE = adapter_kernel_size
__SCREAMING_SNAKE_CASE = adapter_stride
__SCREAMING_SNAKE_CASE = num_adapter_layers
__SCREAMING_SNAKE_CASE = output_hidden_size or hidden_size
__SCREAMING_SNAKE_CASE = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = list(A_ )
__SCREAMING_SNAKE_CASE = list(A_ )
__SCREAMING_SNAKE_CASE = list(A_ )
__SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 705 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A__( __magic_name__ ):
lowerCAmelCase = '''naver-clova-ix/donut-base-finetuned-docvqa'''
lowerCAmelCase = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
lowerCAmelCase = '''document_qa'''
lowerCAmelCase = AutoProcessor
lowerCAmelCase = VisionEncoderDecoderModel
lowerCAmelCase = ['''image''', '''text''']
lowerCAmelCase = ['''text''']
def __init__( self : str , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
"""simple docstring"""
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : "Image" , __SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__SCREAMING_SNAKE_CASE = task_prompt.replace('''{user_input}''' , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.pre_processor.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_ids
__SCREAMING_SNAKE_CASE = self.pre_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__SCREAMING_SNAKE_CASE , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__SCREAMING_SNAKE_CASE , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__SCREAMING_SNAKE_CASE , ).sequences
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.pre_processor.batch_decode(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
__SCREAMING_SNAKE_CASE = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
__SCREAMING_SNAKE_CASE = re.sub(r'''<.*?>''' , '''''' , __SCREAMING_SNAKE_CASE , count=1 ).strip() # remove first task start token
__SCREAMING_SNAKE_CASE = self.pre_processor.tokenajson(__SCREAMING_SNAKE_CASE )
return sequence["answer"]
| 690 | 0 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__a = logging.getLogger()
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : List[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , '''all_results.json''' )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ , '''r''' ) as f:
UpperCAmelCase_ : Union[str, Any] = json.load(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
__a = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __a( __A ):
"""simple docstring"""
def a__ ( self ) -> Optional[int]:
import xla_spawn
UpperCAmelCase_ : Any = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ : Any = f'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(UpperCamelCase_ ,'''argv''' ,UpperCamelCase_ ):
UpperCAmelCase_ : int = time()
xla_spawn.main()
UpperCAmelCase_ : int = time()
UpperCAmelCase_ : Any = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start ,500 )
def a__ ( self ) -> Optional[int]:
import xla_spawn
UpperCAmelCase_ : List[str] = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(UpperCamelCase_ ,'''argv''' ,UpperCamelCase_ ):
xla_spawn.main() | 30 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class snake_case__ ( __A ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> None:
"""simple docstring"""
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 419 | 0 |
_a : Dict = tuple[float, float, float]
_a : Optional[Any] = tuple[float, float, float]
def snake_case__ ( UpperCAmelCase : Pointad , UpperCAmelCase : Pointad ):
lowerCAmelCase__ :str = end_pointa[0] - end_pointa[0]
lowerCAmelCase__ :List[Any] = end_pointa[1] - end_pointa[1]
lowerCAmelCase__ :str = end_pointa[2] - end_pointa[2]
return (x, y, z)
def snake_case__ ( UpperCAmelCase : Vectorad , UpperCAmelCase : Vectorad ):
lowerCAmelCase__ :int = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowerCAmelCase__ :Optional[Any] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowerCAmelCase__ :Any = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def snake_case__ ( UpperCAmelCase : Vectorad , UpperCAmelCase : int ):
return tuple(round(UpperCAmelCase , UpperCAmelCase ) for x in vector ) == (0, 0, 0)
def snake_case__ ( UpperCAmelCase : Pointad , UpperCAmelCase : Pointad , UpperCAmelCase : Pointad , UpperCAmelCase : int = 1_0 ):
lowerCAmelCase__ :List[Any] = create_vector(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = create_vector(UpperCAmelCase , UpperCAmelCase )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
| 111 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_a : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
_a : Dict = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
_a : Optional[Any] = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( _A , unittest.TestCase ):
"""simple docstring"""
A = CamembertTokenizer
A = CamembertTokenizerFast
A = True
A = True
def snake_case_ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ :List[str] = CamembertTokenizer(_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = "<pad>"
lowerCAmelCase__ :Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(_lowerCAmelCase ) , 1_004 )
def snake_case_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = CamembertTokenizer(_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
lowerCAmelCase__ :Tuple = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
lowerCAmelCase__ :List[str] = "I was born in 92000, and this is falsé."
lowerCAmelCase__ :Any = tokenizer.encode(_lowerCAmelCase )
lowerCAmelCase__ :Any = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase__ :int = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
lowerCAmelCase__ :List[Any] = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
lowerCAmelCase__ :Union[str, Any] = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase__ :int = self.get_rust_tokenizer()
lowerCAmelCase__ :List[str] = "I was born in 92000, and this is falsé."
lowerCAmelCase__ :int = tokenizer.tokenize(_lowerCAmelCase )
lowerCAmelCase__ :List[str] = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase__ :Union[str, Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
lowerCAmelCase__ :Any = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase__ :Dict = self.get_rust_tokenizer()
lowerCAmelCase__ :Tuple = tokenizer.encode(_lowerCAmelCase )
lowerCAmelCase__ :Any = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
@slow
def snake_case_ ( self ):
'''simple docstring'''
# fmt: off
lowerCAmelCase__ :str = {"input_ids": [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
lowerCAmelCase__ :Tuple = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=_lowerCAmelCase , )
| 111 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__A =logging.get_logger(__name__)
class _snake_case ( _UpperCamelCase ):
lowerCAmelCase :str = ["pixel_values"]
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
super().__init__(**A__)
UpperCAmelCase__ : Optional[int] = size if size is not None else {"shortest_edge": 256}
UpperCAmelCase__ : Dict = get_size_dict(A__ , default_to_square=A__)
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase__ : Any = get_size_dict(A__ , param_name="""crop_size""")
UpperCAmelCase__ : int = do_resize
UpperCAmelCase__ : Optional[Any] = size
UpperCAmelCase__ : Optional[Any] = resample
UpperCAmelCase__ : Optional[int] = do_center_crop
UpperCAmelCase__ : List[Any] = crop_size
UpperCAmelCase__ : List[Any] = do_rescale
UpperCAmelCase__ : Optional[int] = rescale_factor
UpperCAmelCase__ : Optional[Any] = do_normalize
UpperCAmelCase__ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCAmelCase__ : Optional[Any] = get_size_dict(A__ , default_to_square=A__)
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''')
UpperCAmelCase__ : Any = get_resize_output_image_size(A__ , size=size["""shortest_edge"""] , default_to_square=A__)
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCAmelCase__ : Tuple = get_size_dict(A__)
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''')
return center_crop(A__ , size=(size["""height"""], size["""width"""]) , data_format=A__ , **A__)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase):
return rescale(A__ , scale=A__ , data_format=A__ , **A__)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
UpperCAmelCase__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Dict = size if size is not None else self.size
UpperCAmelCase__ : Optional[Any] = get_size_dict(A__ , default_to_square=A__)
UpperCAmelCase__ : Tuple = resample if resample is not None else self.resample
UpperCAmelCase__ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : str = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Tuple = get_size_dict(A__ , param_name="""crop_size""")
UpperCAmelCase__ : Dict = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Any = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Any = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : Dict = make_list_of_images(A__)
if not valid_images(A__):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
UpperCAmelCase__ : Tuple = [to_numpy_array(A__) for image in images]
if do_resize:
UpperCAmelCase__ : Any = [self.resize(image=A__ , size=A__ , resample=A__) for image in images]
if do_center_crop:
UpperCAmelCase__ : List[str] = [self.center_crop(image=A__ , size=A__) for image in images]
if do_rescale:
UpperCAmelCase__ : Any = [self.rescale(image=A__ , scale=A__) for image in images]
if do_normalize:
UpperCAmelCase__ : Union[str, Any] = [self.normalize(image=A__ , mean=A__ , std=A__) for image in images]
UpperCAmelCase__ : Optional[Any] = [to_channel_dimension_format(A__ , A__) for image in images]
UpperCAmelCase__ : Any = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None):
UpperCAmelCase__ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A__) != len(A__):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""")
if is_torch_tensor(A__):
UpperCAmelCase__ : Dict = target_sizes.numpy()
UpperCAmelCase__ : int = []
for idx in range(len(A__)):
UpperCAmelCase__ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=A__)
UpperCAmelCase__ : int = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(A__)
else:
UpperCAmelCase__ : List[Any] = logits.argmax(dim=1)
UpperCAmelCase__ : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation | 407 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 666 | 0 |
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _lowerCAmelCase ( UpperCAmelCase__ : List[str], UpperCAmelCase__ : Tuple ) ->Optional[int]:
A__ : Optional[int] = k_size // 2
A__ , A__ : List[Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
A__ : Optional[int] = 1 / (2 * pi * sigma) * exp(-(square(UpperCAmelCase__ ) + square(UpperCAmelCase__ )) / (2 * square(UpperCAmelCase__ )) )
return g
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : str ) ->List[Any]:
A__ , A__ : List[Any] = image.shape[0], image.shape[1]
# dst image height and width
A__ : List[str] = height - k_size + 1
A__ : Any = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
A__ : str = zeros((dst_height * dst_width, k_size * k_size) )
A__ : Union[str, Any] = 0
for i, j in product(range(UpperCAmelCase__ ), range(UpperCAmelCase__ ) ):
A__ : Optional[Any] = ravel(image[i : i + k_size, j : j + k_size] )
A__ : List[Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
A__ : Any = gen_gaussian_kernel(UpperCAmelCase__, UpperCAmelCase__ )
A__ : Tuple = ravel(UpperCAmelCase__ )
# reshape and get the dst image
A__ : str = dot(UpperCAmelCase__, UpperCAmelCase__ ).reshape(UpperCAmelCase__, UpperCAmelCase__ ).astype(UpperCAmelCase__ )
return dst
if __name__ == "__main__":
# read original image
A_ = imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
A_ = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
A_ = gaussian_filter(gray, 3, sigma=1)
A_ = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 498 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ = logging.get_logger(__name__)
A_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
A_ = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
A_ = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _lowerCAmelCase ( ) ->Any:
A__ : Any = (
list(range(ord("""!""" ), ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ), ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ), ord("""ÿ""" ) + 1 ) )
)
A__ : Optional[int] = bs[:]
A__ : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCAmelCase__ )
cs.append(2**8 + n )
n += 1
A__ : Optional[int] = [chr(UpperCAmelCase__ ) for n in cs]
return dict(zip(UpperCAmelCase__, UpperCAmelCase__ ) )
def _lowerCAmelCase ( UpperCAmelCase__ : Dict ) ->Tuple:
A__ : Any = set()
A__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ : Dict = char
return pairs
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['input_ids', 'attention_mask']
def __init__( self : int , snake_case : str , snake_case : Optional[Any] , snake_case : str="replace" , snake_case : Tuple="<s>" , snake_case : Dict="</s>" , snake_case : Optional[Any]="</s>" , snake_case : int="<s>" , snake_case : Dict="<unk>" , snake_case : List[Any]="<pad>" , snake_case : Union[str, Any]="<mask>" , snake_case : Optional[Any]=False , **snake_case : Union[str, Any] , ):
'''simple docstring'''
A__ : Optional[Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token
A__ : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token
A__ : List[str] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token
A__ : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token
A__ : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else unk_token
A__ : int = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : str = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
super().__init__(
errors=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , **snake_case , )
with open(snake_case , encoding="""utf-8""" ) as vocab_handle:
A__ : Dict = json.load(snake_case )
A__ : Tuple = {v: k for k, v in self.encoder.items()}
A__ : Union[str, Any] = errors # how to handle errors in decoding
A__ : int = bytes_to_unicode()
A__ : str = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case , encoding="""utf-8""" ) as merges_handle:
A__ : Tuple = merges_handle.read().split("""\n""" )[1:-1]
A__ : Optional[Any] = [tuple(merge.split() ) for merge in bpe_merges]
A__ : List[str] = dict(zip(snake_case , range(len(snake_case ) ) ) )
A__ : Union[str, Any] = {}
A__ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A__ : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Optional[Any] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A__ : Tuple = tuple(snake_case )
A__ : Union[str, Any] = get_pairs(snake_case )
if not pairs:
return token
while True:
A__ : Any = min(snake_case , key=lambda snake_case : self.bpe_ranks.get(snake_case , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ : Optional[int] = bigram
A__ : Tuple = []
A__ : Dict = 0
while i < len(snake_case ):
try:
A__ : int = word.index(snake_case , snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ : Optional[Any] = j
if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ : Optional[int] = tuple(snake_case )
A__ : str = new_word
if len(snake_case ) == 1:
break
else:
A__ : List[str] = get_pairs(snake_case )
A__ : int = """ """.join(snake_case )
A__ : Optional[int] = word
return word
def _UpperCamelCase ( self : Dict , snake_case : int ):
'''simple docstring'''
A__ : Optional[Any] = []
for token in re.findall(self.pat , snake_case ):
A__ : Optional[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case ).split(""" """ ) )
return bpe_tokens
def _UpperCamelCase ( self : str , snake_case : Union[str, Any] ):
'''simple docstring'''
return self.encoder.get(snake_case , self.encoder.get(self.unk_token ) )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : int ):
'''simple docstring'''
return self.decoder.get(snake_case )
def _UpperCamelCase ( self : str , snake_case : int ):
'''simple docstring'''
A__ : Optional[Any] = """""".join(snake_case )
A__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def _UpperCamelCase ( self : str , snake_case : str , snake_case : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ : Any = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : List[Any] = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(snake_case , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case , ensure_ascii=snake_case ) + """\n""" )
A__ : str = 0
with open(snake_case , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A__ : List[str] = token_index
writer.write(""" """.join(snake_case ) + """\n""" )
index += 1
return vocab_file, merge_file
def _UpperCamelCase ( self : Any , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
def _UpperCamelCase ( self : List[str] , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Union[str, Any] = [self.sep_token_id]
A__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : List[Any] , snake_case : Optional[int] , snake_case : List[Any]=False , **snake_case : Tuple ):
'''simple docstring'''
A__ : Optional[Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case ) > 0 and not text[0].isspace()):
A__ : int = """ """ + text
return (text, kwargs)
def _UpperCamelCase ( self : Dict , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def _UpperCamelCase ( self : int , snake_case : "Conversation" ):
'''simple docstring'''
A__ : str = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(snake_case )
A__ : Tuple = """ """.join(snake_case )
A__ : Dict = self.encode(snake_case )
if len(snake_case ) > self.model_max_length:
A__ : Optional[int] = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 498 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = BioGptTokenizer
__UpperCAmelCase = False
def _a ( self) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__snake_case = dict(zip(lowercase_ , range(len(lowercase_))))
__snake_case = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w') as fp:
fp.write(json.dumps(lowercase_))
with open(self.merges_file , 'w') as fp:
fp.write('\n'.join(lowercase_))
def _a ( self , lowercase_) -> Optional[int]:
__snake_case = 'lower newer'
__snake_case = 'lower newer'
return input_text, output_text
def _a ( self) -> Optional[Any]:
__snake_case = BioGptTokenizer(self.vocab_file , self.merges_file)
__snake_case = 'lower'
__snake_case = ['low', 'er</w>']
__snake_case = tokenizer.tokenize(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
__snake_case = tokens + ['<unk>']
__snake_case = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) , lowercase_)
@slow
def _a ( self) -> List[str]:
__snake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt')
__snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowercase_)
__snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase_)
__snake_case = tokenizer.build_inputs_with_special_tokens(lowercase_)
__snake_case = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_)
self.assertTrue(encoded_sentence == [2] + text)
self.assertTrue(encoded_pair == [2] + text + [2] + text_a)
| 313 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __lowercase :
def __init__( self , lowercase_ = "cpu" , lowercase_ = "openai/clip-vit-large-patch14") -> None:
__snake_case = device
__snake_case = CLIPTokenizerFast.from_pretrained(lowercase_)
__snake_case = [0.4814_5466, 0.457_8275, 0.4082_1073]
__snake_case = [0.2686_2954, 0.2613_0258, 0.2757_7711]
__snake_case = torchvision.transforms.Normalize(self.image_mean , self.image_std)
__snake_case = torchvision.transforms.Resize(2_2_4)
__snake_case = torchvision.transforms.CenterCrop(2_2_4)
def _a ( self , lowercase_) -> int:
__snake_case = self.resize(lowercase_)
__snake_case = self.center_crop(lowercase_)
__snake_case = self.normalize(lowercase_)
return images
def __call__( self , lowercase_=None , lowercase_=None , **lowercase_) -> Union[str, Any]:
__snake_case = self.tokenizer(text=lowercase_ , **lowercase_)
__snake_case = self.preprocess_img(lowercase_)
__snake_case = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class __lowercase ( nn.Module ):
def __init__( self , lowercase_=1_0 , lowercase_=0.01 , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=False , lowercase_=True , lowercase_="image" , lowercase_=True , lowercase_=False , lowercase_=False , lowercase_=False , ) -> None:
super().__init__()
__snake_case = None
__snake_case = device if device else get_device()
if vqgan:
__snake_case = vqgan
else:
__snake_case = load_vqgan(self.device , conf_path=lowercase_ , ckpt_path=lowercase_)
self.vqgan.eval()
if clip:
__snake_case = clip
else:
__snake_case = CLIPModel.from_pretrained('openai/clip-vit-base-patch32')
self.clip.to(self.device)
__snake_case = ProcessorGradientFlow(device=self.device)
__snake_case = iterations
__snake_case = lr
__snake_case = log
__snake_case = make_grid
__snake_case = return_val
__snake_case = quantize
__snake_case = self.vqgan.decoder.z_shape
def _a ( self , lowercase_=None , lowercase_=None , lowercase_=5 , lowercase_=True) -> List[str]:
__snake_case = []
if output_path is None:
__snake_case = './animation.gif'
if input_path is None:
__snake_case = self.save_path
__snake_case = sorted(glob(input_path + '/*'))
if not len(lowercase_):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)')
if len(lowercase_) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)')
__snake_case = total_duration / len(lowercase_)
__snake_case = [frame_duration] * len(lowercase_)
if extend_frames:
__snake_case = 1.5
__snake_case = 3
for file_name in paths:
if file_name.endswith('.png'):
images.append(imageio.imread(lowercase_))
imageio.mimsave(lowercase_ , lowercase_ , duration=lowercase_)
print(F"gif saved to {output_path}")
def _a ( self , lowercase_=None , lowercase_=None) -> Union[str, Any]:
if not (path or img):
raise ValueError('Input either path or tensor')
if img is not None:
raise NotImplementedError
__snake_case = preprocess(Image.open(lowercase_) , target_image_size=2_5_6).to(self.device)
__snake_case = preprocess_vqgan(lowercase_)
__snake_case , *__snake_case = self.vqgan.encode(lowercase_)
return z
def _a ( self , lowercase_) -> Dict:
__snake_case = self.latent.detach().requires_grad_()
__snake_case = base_latent + transform_vector
if self.quantize:
__snake_case , *__snake_case = self.vqgan.quantize(lowercase_)
else:
__snake_case = trans_latent
return self.vqgan.decode(lowercase_)
def _a ( self , lowercase_ , lowercase_ , lowercase_=None) -> Any:
__snake_case = self.clip_preprocessor(text=lowercase_ , images=lowercase_ , return_tensors='pt' , padding=lowercase_)
__snake_case = self.clip(**lowercase_)
__snake_case = clip_outputs.logits_per_image
if weights is not None:
__snake_case = similarity_logits * weights
return similarity_logits.sum()
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> List[Any]:
__snake_case = self._get_clip_similarity(pos_prompts['prompts'] , lowercase_ , weights=(1 / pos_prompts['weights']))
if neg_prompts:
__snake_case = self._get_clip_similarity(neg_prompts['prompts'] , lowercase_ , weights=neg_prompts['weights'])
else:
__snake_case = torch.tensor([1] , device=self.device)
__snake_case = -torch.log(lowercase_) + torch.log(lowercase_)
return loss
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Any:
__snake_case = torch.randn_like(self.latent , requires_grad=lowercase_ , device=self.device)
__snake_case = torch.optim.Adam([vector] , lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
__snake_case = self._add_vector(lowercase_)
__snake_case = loop_post_process(lowercase_)
__snake_case = self._get_CLIP_loss(lowercase_ , lowercase_ , lowercase_)
print('CLIP loss' , lowercase_)
if self.log:
wandb.log({'CLIP Loss': clip_loss})
clip_loss.backward(retain_graph=lowercase_)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Any:
wandb.init(reinit=lowercase_ , project='face-editor')
wandb.config.update({'Positive Prompts': positive_prompts})
wandb.config.update({'Negative Prompts': negative_prompts})
wandb.config.update({'lr': self.lr, 'iterations': self.iterations})
if image_path:
__snake_case = Image.open(lowercase_)
__snake_case = image.resize((2_5_6, 2_5_6))
wandb.log('Original Image' , wandb.Image(lowercase_))
def _a ( self , lowercase_) -> Optional[int]:
if not prompts:
return []
__snake_case = []
__snake_case = []
if isinstance(lowercase_ , lowercase_):
__snake_case = [prompt.strip() for prompt in prompts.split('|')]
for prompt in prompts:
if isinstance(lowercase_ , (tuple, list)):
__snake_case = prompt[0]
__snake_case = float(prompt[1])
elif ":" in prompt:
__snake_case , __snake_case = prompt.split(':')
__snake_case = float(lowercase_)
else:
__snake_case = prompt
__snake_case = 1.0
processed_prompts.append(lowercase_)
weights.append(lowercase_)
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowercase_ , device=self.device),
}
def _a ( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_=None , ) -> List[str]:
if image_path:
__snake_case = self._get_latent(lowercase_)
else:
__snake_case = torch.randn(self.latent_dim , device=self.device)
if self.log:
self._init_logging(lowercase_ , lowercase_ , lowercase_)
assert pos_prompts, "You must provide at least one positive prompt."
__snake_case = self.process_prompts(lowercase_)
__snake_case = self.process_prompts(lowercase_)
if save_final and save_path is None:
__snake_case = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts']))
if not os.path.exists(lowercase_):
os.makedirs(lowercase_)
else:
__snake_case = save_path + '_' + get_timestamp()
os.makedirs(lowercase_)
__snake_case = save_path
__snake_case = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print('Original Image')
show_pil(custom_to_pil(lowercase_))
__snake_case = loop_post_process(lowercase_)
for iter, transformed_img in enumerate(self._optimize_CLIP(lowercase_ , lowercase_ , lowercase_)):
if show_intermediate:
show_pil(lowercase_)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}.png"))
if self.log:
wandb.log({'Image': wandb.Image(lowercase_)})
if show_final:
show_pil(lowercase_)
if save_final:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}_final.png"))
| 313 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : int = {"""vocab_file""": """vocab.txt"""}
_lowercase : Optional[int] = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
_lowercase : Optional[int] = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
_lowercase : Optional[int] = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : str = VOCAB_FILES_NAMES
__magic_name__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Any = PRETRAINED_INIT_CONFIGURATION
__magic_name__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Tuple = ConvBertTokenizer
def __init__( self : List[str] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str=None , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : str="[UNK]" , lowerCAmelCase : Any="[SEP]" , lowerCAmelCase : List[str]="[PAD]" , lowerCAmelCase : Any="[CLS]" , lowerCAmelCase : Optional[int]="[MASK]" , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : int=None , **lowerCAmelCase : List[str] , )-> Any:
"""simple docstring"""
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(lowerCAmelCase , normalizer_state.pop('''type''' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**lowerCAmelCase )
UpperCAmelCase = do_lower_case
def a__( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict=None )-> str:
"""simple docstring"""
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__( self : str , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 50 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] , lowerCAmelCase : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 250
UpperCAmelCase = ids_tensor((batch_size, length) , lowerCAmelCase )
UpperCAmelCase = torch.ones((batch_size, length) , device=lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = MaxLengthCriteria(max_length=10 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : int )-> Any:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase ) , 1 )
| 50 | 1 |
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCamelCase = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": f"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results",
"""emoji""": True,
},
}
]
lowerCamelCase = 0
for log in Path().glob("""*.log"""):
lowerCamelCase = 0
with open(log, """r""") as f:
for line in f:
lowerCamelCase = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCamelCase = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCamelCase = f"{line['duration']:.4f}"
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase = []
log.unlink()
lowerCamelCase = """"""
lowerCamelCase = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase = []
lowerCamelCase = {}
for test in failed_tests:
lowerCamelCase = test[0].split("""::""")
lowerCamelCase = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCamelCase = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase = [test[0] for test in failed_table]
lowerCamelCase = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowerCamelCase = """Too many failed tests, please see the full report in the Action results."""
lowerCamelCase = len(err) + 10
lowerCamelCase = message[: 3000 - offset] + f"\n...\n```\n{err}"
print(f"### {message}")
else:
lowerCamelCase = """No failed tests! 🤗"""
print(f"## {message}")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCamelCase = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCamelCase = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCamelCase = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": f"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
lowerCamelCase = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": f"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
}
],
}
payload.append(date_report)
lowerCamelCase = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCamelCase = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase = row[0]
else:
lowerCamelCase = """"""
lowerCamelCase = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": f"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 474 |
'''simple docstring'''
from collections.abc import Sequence
def _A ( _lowerCAmelCase = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
__lowercase =nums[0]
for i in range(1 , len(_lowerCAmelCase ) ):
__lowercase =nums[i]
__lowercase =max(_lowerCAmelCase , ans + num , _lowerCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCamelCase = int(input("""Enter number of elements : """).strip())
lowerCamelCase = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 474 | 1 |
'''simple docstring'''
_lowercase : int ="\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_lowercase : List[Any] =[{"type": "code", "content": INSTALL_CONTENT}]
_lowercase : Optional[Any] ={
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 574 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def __UpperCAmelCase ( UpperCamelCase__ :bytes ) -> bytes:
if len(UpperCamelCase__ ) != 32:
raise ValueError('''Input must be of length 32''' )
snake_case__ : Any = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __UpperCAmelCase ( UpperCamelCase__ :int ) -> bytes:
if i < 0:
raise ValueError('''Input must be non-negative''' )
snake_case__ : Union[str, Any] = format(UpperCamelCase__ , '''08x''' )[-8:]
snake_case__ : Dict = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def __UpperCAmelCase ( UpperCamelCase__ :bytes ) -> bytes:
snake_case__ : Optional[Any] = B''''''
for char in message:
bit_string += format(UpperCamelCase__ , '''08b''' ).encode('''utf-8''' )
snake_case__ : List[str] = format(len(UpperCamelCase__ ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __UpperCAmelCase ( UpperCamelCase__ :bytes ) -> Generator[list[int], None, None]:
if len(UpperCamelCase__ ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(UpperCamelCase__ ) , 512 ):
snake_case__ : Union[str, Any] = bit_string[pos : pos + 512]
snake_case__ : Optional[int] = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __UpperCAmelCase ( UpperCamelCase__ :int ) -> int:
if i < 0:
raise ValueError('''Input must be non-negative''' )
snake_case__ : Tuple = format(UpperCamelCase__ , '''032b''' )
snake_case__ : Any = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase__ , 2 )
def __UpperCAmelCase ( UpperCamelCase__ :int , UpperCamelCase__ :int ) -> int:
return (a + b) % 2**32
def __UpperCAmelCase ( UpperCamelCase__ :int , UpperCamelCase__ :int ) -> int:
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __UpperCAmelCase ( UpperCamelCase__ :bytes ) -> bytes:
snake_case__ : int = preprocess(UpperCamelCase__ )
snake_case__ : str = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
snake_case__ : List[str] = 0x67452301
snake_case__ : Any = 0xefcdab89
snake_case__ : List[Any] = 0x98badcfe
snake_case__ : int = 0x10325476
snake_case__ : Any = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase__ ):
snake_case__ : Dict = aa
snake_case__ : Tuple = ba
snake_case__ : Any = ca
snake_case__ : Any = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
snake_case__ : Dict = d ^ (b & (c ^ d))
snake_case__ : Optional[int] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
snake_case__ : Optional[Any] = c ^ (d & (b ^ c))
snake_case__ : Tuple = (5 * i + 1) % 16
elif i <= 47:
snake_case__ : Union[str, Any] = b ^ c ^ d
snake_case__ : List[str] = (3 * i + 5) % 16
else:
snake_case__ : int = c ^ (b | not_aa(UpperCamelCase__ ))
snake_case__ : Optional[Any] = (7 * i) % 16
snake_case__ : List[Any] = (f + a + added_consts[i] + block_words[g]) % 2**32
snake_case__ : Optional[int] = d
snake_case__ : Dict = c
snake_case__ : Dict = b
snake_case__ : int = sum_aa(UpperCamelCase__ , left_rotate_aa(UpperCamelCase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
snake_case__ : Union[str, Any] = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Union[str, Any] = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Union[str, Any] = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : List[Any] = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Optional[int] = reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 574 | 1 |
"""simple docstring"""
import numpy
# List of input, output pairs
_lowerCAmelCase = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_lowerCAmelCase = (((515, 22, 13), 555), ((61, 35, 49), 150))
_lowerCAmelCase = [2, 4, 1, 5]
_lowerCAmelCase = len(train_data)
_lowerCAmelCase = 0.009
def __UpperCamelCase ( snake_case__ , snake_case__="train" ):
return calculate_hypothesis_value(snake_case__ , snake_case__ ) - output(
snake_case__ , snake_case__ )
def __UpperCamelCase ( snake_case__ ):
A_ : List[Any] = 0
for i in range(len(snake_case__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __UpperCamelCase ( snake_case__ , snake_case__ ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __UpperCamelCase ( snake_case__ , snake_case__ ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __UpperCamelCase ( snake_case__ , snake_case__=m ):
A_ : Union[str, Any] = 0
for i in range(snake_case__ ):
if index == -1:
summation_value += _error(snake_case__ )
else:
summation_value += _error(snake_case__ ) * train_data[i][0][index]
return summation_value
def __UpperCamelCase ( snake_case__ ):
A_ : List[str] = summation_of_cost_derivative(snake_case__ , snake_case__ ) / m
return cost_derivative_value
def __UpperCamelCase ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
A_ : Dict = 0.000_002
A_ : List[Any] = 0
A_ : str = 0
while True:
j += 1
A_ : Tuple = [0, 0, 0, 0]
for i in range(0 , len(snake_case__ ) ):
A_ : int = get_cost_derivative(i - 1 )
A_ : Optional[int] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
snake_case__ , snake_case__ , atol=snake_case__ , rtol=snake_case__ , ):
break
A_ : Tuple = temp_parameter_vector
print(("""Number of iterations:""", j) )
def __UpperCamelCase ( ):
for i in range(len(snake_case__ ) ):
print(("""Actual output value:""", output(snake_case__ , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(snake_case__ , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 180 |
"""simple docstring"""
from __future__ import annotations
def __UpperCamelCase ( snake_case__ , snake_case__ = None , snake_case__ = None ):
if start is None:
A_ : Dict = 0
if end is None:
A_ : Dict = len(snake_case__ ) - 1
if start >= end:
return
A_ : List[Any] = (start + end) // 2
slowsort(snake_case__ , snake_case__ , snake_case__ )
slowsort(snake_case__ , mid + 1 , snake_case__ )
if sequence[end] < sequence[mid]:
A_ , A_ : Dict = sequence[mid], sequence[end]
slowsort(snake_case__ , snake_case__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 180 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int ):
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
snake_case__ : Tuple = number_of_bytes // partitions
snake_case__ : Any = []
for i in range(lowerCAmelCase__ ):
snake_case__ : List[str] = i * bytes_per_partition + 1
snake_case__ : Optional[int] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ):
for attribute in key.split("." ):
snake_case__ : int = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : str = value
elif weight_type == "weight_g":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_v":
snake_case__ : Optional[Any] = value
elif weight_type == "bias":
snake_case__ : str = value
else:
snake_case__ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ):
snake_case__ : str = []
snake_case__ : Optional[int] = fairseq_model.state_dict()
snake_case__ : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case__ : int = True
if "*" in mapped_key:
snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : Any = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ : Optional[Any] = "weight_v"
elif "bias" in name:
snake_case__ : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[Any] = "weight"
else:
snake_case__ : Optional[Any] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ):
snake_case__ : Tuple = full_name.split("conv_layers." )[-1]
snake_case__ : Union[str, Any] = name.split("." )
snake_case__ : str = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ):
if config_path is not None:
snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Tuple = UniSpeechSatConfig()
snake_case__ : str = ""
if is_finetuned:
snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ )
else:
snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ )
snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 25 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.