code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCamelCase__ = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowerCamelCase__ = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = SavedModel()
_UpperCAmelCase : Any = []
with open(os.path.join(__lowerCAmelCase , "utils" , "tf_ops" , "onnx.json" ) ) as f:
_UpperCAmelCase : Tuple = json.load(__lowerCAmelCase )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__lowerCAmelCase )] )
with open(__lowerCAmelCase , "rb" ) as f:
saved_model.ParseFromString(f.read() )
_UpperCAmelCase : Optional[Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_UpperCAmelCase : int = sorted(__lowerCAmelCase )
_UpperCAmelCase : Dict = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__lowerCAmelCase )
if strict and len(__lowerCAmelCase ) > 0:
raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(__lowerCAmelCase ) > 0:
print(F"""Found the following incompatible ops for the opset {opset}:""" )
print(*__lowerCAmelCase , sep="\n" )
else:
print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
lowerCamelCase__ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 322
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
lowerCamelCase__ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __lowerCAmelCase (__lowerCAmelCase ):
with open(__lowerCAmelCase , "rb" ) as f:
_UpperCAmelCase : List[str] = Image.open(__lowerCAmelCase )
return im.convert("RGB" )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the training data."} )
lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the validation data."} )
lowerCAmelCase : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : str = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCAmelCase__ )} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
lowerCAmelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase : str = field(default=UpperCAmelCase__ , metadata={"help": "Name or path of preprocessor config."} )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = torch.stack([example["pixel_values"] for example in examples] )
_UpperCAmelCase : Tuple = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __lowerCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , __lowerCAmelCase , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_UpperCAmelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_UpperCAmelCase : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
_UpperCAmelCase : List[Any] = {}
if data_args.train_dir is not None:
_UpperCAmelCase : str = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
_UpperCAmelCase : Optional[Any] = os.path.join(data_args.validation_dir , "**" )
_UpperCAmelCase : Any = load_dataset(
"imagefolder" , data_files=__lowerCAmelCase , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
_UpperCAmelCase : int = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowerCAmelCase ) and data_args.train_val_split > 0.0:
_UpperCAmelCase : List[Any] = dataset["train"].train_test_split(data_args.train_val_split )
_UpperCAmelCase : List[str] = split["train"]
_UpperCAmelCase : Union[str, Any] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_UpperCAmelCase : Optional[int] = dataset["train"].features["labels"].names
_UpperCAmelCase , _UpperCAmelCase : int = {}, {}
for i, label in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : int = str(__lowerCAmelCase )
_UpperCAmelCase : str = label
# Load the accuracy metric from the datasets package
_UpperCAmelCase : int = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCAmelCase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowerCAmelCase ) , labelaid=__lowerCAmelCase , idalabel=__lowerCAmelCase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase : List[str] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
_UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_UpperCAmelCase : int = image_processor.size["shortest_edge"]
else:
_UpperCAmelCase : int = (image_processor.size["height"], image_processor.size["width"])
_UpperCAmelCase : str = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
_UpperCAmelCase : Optional[int] = Compose(
[
RandomResizedCrop(__lowerCAmelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_UpperCAmelCase : Union[str, Any] = Compose(
[
Resize(__lowerCAmelCase ),
CenterCrop(__lowerCAmelCase ),
ToTensor(),
normalize,
] )
def train_transforms(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_UpperCAmelCase : Dict = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__lowerCAmelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_UpperCAmelCase : Optional[Any] = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__lowerCAmelCase )
# Initalize our trainer
_UpperCAmelCase : Union[str, Any] = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase : Any = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : int = last_checkpoint
_UpperCAmelCase : Dict = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCAmelCase : Dict = trainer.evaluate()
trainer.log_metrics("eval" , __lowerCAmelCase )
trainer.save_metrics("eval" , __lowerCAmelCase )
# Write model card and (optionally) push to hub
_UpperCAmelCase : int = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCAmelCase )
else:
trainer.create_model_card(**__lowerCAmelCase )
if __name__ == "__main__":
main()
| 322
| 1
|
'''simple docstring'''
from typing import Any
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = data
_UpperCAmelCase : Tuple = None
class lowerCAmelCase__ :
def __init__( self : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = None
def lowerCAmelCase__ ( self : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.head
while temp is not None:
print(temp.data , end=" " )
_UpperCAmelCase : int = temp.next
print()
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Tuple = Node(lowerCamelCase__ )
_UpperCAmelCase : Any = self.head
_UpperCAmelCase : Union[str, Any] = new_node
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple ) ->Any:
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
_UpperCAmelCase : Union[str, Any] = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : Union[str, Any] = node_a.next
_UpperCAmelCase : List[str] = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : Optional[Any] = node_a.next
if node_a is None or node_a is None:
return
_UpperCAmelCase , _UpperCAmelCase : Any = node_a.data, node_a.data
if __name__ == "__main__":
lowerCamelCase__ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 322
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase__ = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ = 'RegNetConfig'
# Base docstring
lowerCamelCase__ = 'facebook/regnet-y-040'
lowerCamelCase__ = [1, 1_088, 7, 7]
# Image classification docstring
lowerCamelCase__ = 'facebook/regnet-y-040'
lowerCamelCase__ = 'tabby, tabby cat'
lowerCamelCase__ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 3 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[str] = "relu" , **lowerCamelCase__ : Tuple , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_UpperCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_UpperCAmelCase : Dict = tf.keras.layers.ConvaD(
filters=lowerCamelCase__ , kernel_size=lowerCamelCase__ , strides=lowerCamelCase__ , padding="VALID" , groups=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" , )
_UpperCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
_UpperCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.convolution(self.padding(lowerCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = self.normalization(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = config.num_channels
_UpperCAmelCase : Any = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = shape_list(lowerCamelCase__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_UpperCAmelCase : Optional[Any] = tf.transpose(lowerCamelCase__ , perm=(0, 2, 3, 1) )
_UpperCAmelCase : List[Any] = self.embedder(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : int = tf.keras.layers.ConvaD(
filters=lowerCamelCase__ , kernel_size=1 , strides=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" )
_UpperCAmelCase : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False ) ->tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(lowerCamelCase__ ) , training=lowerCamelCase__ )
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ) ->Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" )
_UpperCAmelCase : int = [
tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.pooler(lowerCamelCase__ )
for layer_module in self.attention:
_UpperCAmelCase : str = layer_module(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = hidden_state * pooled
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : Any ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = in_channels != out_channels or stride != 1
_UpperCAmelCase : List[str] = max(1 , out_channels // config.groups_width )
_UpperCAmelCase : List[str] = (
TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_UpperCAmelCase : Optional[int] = [
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.2" ),
]
_UpperCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = hidden_state
for layer_module in self.layers:
_UpperCAmelCase : List[Any] = layer_module(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.shortcut(lowerCamelCase__ )
hidden_state += residual
_UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = in_channels != out_channels or stride != 1
_UpperCAmelCase : Optional[int] = max(1 , out_channels // config.groups_width )
_UpperCAmelCase : Union[str, Any] = (
TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
_UpperCAmelCase : List[Any] = [
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(lowerCamelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.3" ),
]
_UpperCAmelCase : int = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = hidden_state
for layer_module in self.layers:
_UpperCAmelCase : Tuple = layer_module(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.shortcut(lowerCamelCase__ )
hidden_state += residual
_UpperCAmelCase : Tuple = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : str = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
_UpperCAmelCase : List[str] = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ , name="layers.0" ),
*[layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] ) ->List[str]:
'''simple docstring'''
for layer_module in self.layers:
_UpperCAmelCase : Optional[int] = layer_module(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
_UpperCAmelCase : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , depth=lowerCamelCase__ , name=F"""stages.{i+1}""" ) )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True ) ->TFBaseModelOutputWithNoAttention:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCAmelCase : Optional[Any] = hidden_states + (hidden_state,)
_UpperCAmelCase : Dict = stage_module(lowerCamelCase__ )
if output_hidden_states:
_UpperCAmelCase : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase__ , hidden_states=lowerCamelCase__ )
@keras_serializable
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
lowerCAmelCase : Optional[Any] = RegNetConfig
def __init__( self : Union[str, Any] , lowerCamelCase__ : Any , **lowerCamelCase__ : str ) ->int:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = config
_UpperCAmelCase : Union[str, Any] = TFRegNetEmbeddings(lowerCamelCase__ , name="embedder" )
_UpperCAmelCase : Union[str, Any] = TFRegNetEncoder(lowerCamelCase__ , name="encoder" )
_UpperCAmelCase : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" )
@unpack_inputs
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , ) ->TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
_UpperCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.embedder(lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : str = self.encoder(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : Dict = encoder_outputs[0]
_UpperCAmelCase : Dict = self.pooler(lowerCamelCase__ )
# Change to NCHW output format have uniformity in the modules
_UpperCAmelCase : Union[str, Any] = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) )
_UpperCAmelCase : Tuple = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_UpperCAmelCase : List[str] = tuple([tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase__ , pooler_output=lowerCamelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Tuple = RegNetConfig
lowerCAmelCase : Tuple = "regnet"
lowerCAmelCase : Union[str, Any] = "pixel_values"
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
lowerCamelCase__ = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCamelCase__ = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Any , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ) ->Optional[int]:
'''simple docstring'''
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Any=False , ) ->Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.regnet(
pixel_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Union[str, Any] ) ->Any:
'''simple docstring'''
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = config.num_labels
_UpperCAmelCase : Dict = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" )
# classification head
_UpperCAmelCase : str = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict=False , ) ->Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
_UpperCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.regnet(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : int = outputs.pooler_output if return_dict else outputs[1]
_UpperCAmelCase : Dict = self.classifier[0](lowerCamelCase__ )
_UpperCAmelCase : str = self.classifier[1](lowerCamelCase__ )
_UpperCAmelCase : Tuple = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase__ , logits=lowerCamelCase__ )
if not return_dict:
_UpperCAmelCase : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase__ , logits=lowerCamelCase__ , hidden_states=outputs.hidden_states )
| 322
| 1
|
'''simple docstring'''
from statistics import mean, stdev
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 3 ):
_UpperCAmelCase : Any = min(__lowerCAmelCase )
_UpperCAmelCase : List[str] = max(__lowerCAmelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , __lowerCAmelCase ) for x in data]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 3 ):
_UpperCAmelCase : List[Any] = mean(__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = stdev(__lowerCAmelCase )
# standardize data
return [round((x - mu) / (sigma) , __lowerCAmelCase ) for x in data]
| 322
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __lowerCAmelCase (__lowerCAmelCase ):
if is_torch_version("<" , "2.0.0" ) or not hasattr(__lowerCAmelCase , "_dynamo" ):
return False
return isinstance(__lowerCAmelCase , torch._dynamo.eval_frame.OptimizedModule )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = True ):
_UpperCAmelCase : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_UpperCAmelCase : Dict = is_compiled_module(__lowerCAmelCase )
if is_compiled:
_UpperCAmelCase : Optional[int] = model
_UpperCAmelCase : Any = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = model.module
if not keep_fpaa_wrapper:
_UpperCAmelCase : List[Any] = getattr(__lowerCAmelCase , "forward" )
_UpperCAmelCase : Dict = model.__dict__.pop("_original_forward" , __lowerCAmelCase )
if original_forward is not None:
while hasattr(__lowerCAmelCase , "__wrapped__" ):
_UpperCAmelCase : Optional[int] = forward.__wrapped__
if forward == original_forward:
break
_UpperCAmelCase : Dict = forward
if getattr(__lowerCAmelCase , "_converted_to_transformer_engine" , __lowerCAmelCase ):
convert_model(__lowerCAmelCase , to_transformer_engine=__lowerCAmelCase )
if is_compiled:
_UpperCAmelCase : int = model
_UpperCAmelCase : str = compiled_model
return model
def __lowerCAmelCase ():
PartialState().wait_for_everyone()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__lowerCAmelCase , __lowerCAmelCase )
elif PartialState().local_process_index == 0:
torch.save(__lowerCAmelCase , __lowerCAmelCase )
@contextmanager
def __lowerCAmelCase (**__lowerCAmelCase ):
for key, value in kwargs.items():
_UpperCAmelCase : str = str(__lowerCAmelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __lowerCAmelCase (__lowerCAmelCase ):
if not hasattr(__lowerCAmelCase , "__qualname__" ) and not hasattr(__lowerCAmelCase , "__name__" ):
_UpperCAmelCase : List[str] = getattr(__lowerCAmelCase , "__class__" , __lowerCAmelCase )
if hasattr(__lowerCAmelCase , "__qualname__" ):
return obj.__qualname__
if hasattr(__lowerCAmelCase , "__name__" ):
return obj.__name__
return str(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
for key, value in source.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = destination.setdefault(__lowerCAmelCase , {} )
merge_dicts(__lowerCAmelCase , __lowerCAmelCase )
else:
_UpperCAmelCase : Optional[int] = value
return destination
def __lowerCAmelCase (__lowerCAmelCase = None ):
if port is None:
_UpperCAmelCase : Tuple = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 322
| 1
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = [True] * 1_000_001
lowerCamelCase__ = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
lowerCamelCase__ = False
i += 1
def __lowerCAmelCase (__lowerCAmelCase ):
return seive[n]
def __lowerCAmelCase (__lowerCAmelCase ):
return any(digit in "02468" for digit in str(__lowerCAmelCase ) )
def __lowerCAmelCase (__lowerCAmelCase = 1_000_000 ):
_UpperCAmelCase : Tuple = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__lowerCAmelCase ) and not contains_an_even_digit(__lowerCAmelCase ):
_UpperCAmelCase : str = str(__lowerCAmelCase )
_UpperCAmelCase : Dict = [int(str_num[j:] + str_num[:j] ) for j in range(len(__lowerCAmelCase ) )]
if all(is_prime(__lowerCAmelCase ) for i in list_nums ):
result.append(__lowerCAmelCase )
return result
def __lowerCAmelCase ():
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'''{len(find_circular_primes()) = }''')
| 322
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322
| 1
|
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCamelCase__ = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
lowerCamelCase__ = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
lowerCamelCase__ = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
lowerCamelCase__ = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
lowerCamelCase__ = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
lowerCamelCase__ = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
lowerCamelCase__ = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def __lowerCAmelCase ():
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = randrange(len(__lowerCAmelCase ) ), randrange(len(__lowerCAmelCase ) )
_UpperCAmelCase : Optional[Any] = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
_UpperCAmelCase , _UpperCAmelCase : List[Any] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowerCAmelCase (__lowerCAmelCase = 100 ):
return (generate_random_hand() for _ in range(__lowerCAmelCase ))
@pytest.mark.parametrize("hand, expected" , __lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
assert PokerHand(__lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , __lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
assert PokerHand(__lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , __lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Tuple = PokerHand(__lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , __lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
assert PokerHand(__lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , __lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
assert PokerHand(__lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , __lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
def __lowerCAmelCase ():
_UpperCAmelCase : int = [PokerHand(__lowerCAmelCase ) for hand in SORTED_HANDS]
_UpperCAmelCase : str = poker_hands.copy()
shuffle(__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = chain(sorted(__lowerCAmelCase ) )
for index, hand in enumerate(__lowerCAmelCase ):
assert hand == poker_hands[index]
def __lowerCAmelCase ():
# Test that five high straights are compared correctly.
_UpperCAmelCase : Optional[int] = [PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=__lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowerCAmelCase ():
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
_UpperCAmelCase : Any = PokerHand("2C 4S AS 3D 5C" )
_UpperCAmelCase : Any = True
_UpperCAmelCase : Tuple = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowerCAmelCase ():
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
_UpperCAmelCase : Any = 0
_UpperCAmelCase : Optional[int] = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
_UpperCAmelCase : str = os.path.join(__lowerCAmelCase , "poker_hands.txt" )
with open(__lowerCAmelCase ) as file_hand:
for line in file_hand:
_UpperCAmelCase : Dict = line[:14].strip()
_UpperCAmelCase : Optional[int] = line[15:].strip()
_UpperCAmelCase , _UpperCAmelCase : Tuple = PokerHand(__lowerCAmelCase ), PokerHand(__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = player.compare_with(__lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 376
| 322
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
lowerCamelCase__ = int(input('Enter number: ').strip())
print(F'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
| 322
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCamelCase__ = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , ):
output_path.parent.mkdir(parents=__lowerCAmelCase , exist_ok=__lowerCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__lowerCAmelCase , __lowerCAmelCase , f=output_path.as_posix() , input_names=__lowerCAmelCase , output_names=__lowerCAmelCase , dynamic_axes=__lowerCAmelCase , do_constant_folding=__lowerCAmelCase , use_external_data_format=__lowerCAmelCase , enable_onnx_checker=__lowerCAmelCase , opset_version=__lowerCAmelCase , )
else:
export(
__lowerCAmelCase , __lowerCAmelCase , f=output_path.as_posix() , input_names=__lowerCAmelCase , output_names=__lowerCAmelCase , dynamic_axes=__lowerCAmelCase , do_constant_folding=__lowerCAmelCase , opset_version=__lowerCAmelCase , )
@torch.no_grad()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ):
_UpperCAmelCase : int = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_UpperCAmelCase : List[str] = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
_UpperCAmelCase : Union[str, Any] = "cpu"
_UpperCAmelCase : Tuple = Path(__lowerCAmelCase )
# VAE DECODER
_UpperCAmelCase : Union[str, Any] = AutoencoderKL.from_pretrained(model_path + "/vae" )
_UpperCAmelCase : Dict = vae_decoder.config.latent_channels
# forward only through the decoder part
_UpperCAmelCase : Union[str, Any] = vae_decoder.decode
onnx_export(
__lowerCAmelCase , model_args=(
torch.randn(1 , __lowerCAmelCase , 25 , 25 ).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=__lowerCAmelCase , )
del vae_decoder
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
lowerCamelCase__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 322
|
'''simple docstring'''
from collections.abc import Sequence
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return sum(c * (x**i) for i, c in enumerate(__lowerCAmelCase ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = 0.0
for coeff in reversed(__lowerCAmelCase ):
_UpperCAmelCase : int = result * x + coeff
return result
if __name__ == "__main__":
lowerCamelCase__ = (0.0, 0.0, 5.0, 9.3, 7.0)
lowerCamelCase__ = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 322
| 1
|
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = hf_hub_url(repo_id=__lowerCAmelCase , path=__lowerCAmelCase , revision=__lowerCAmelCase )
assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(__lowerCAmelCase )}"""
| 322
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
_UpperCAmelCase : Tuple = sum(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_UpperCAmelCase : Any = True
for i in range(1 , s + 1 ):
_UpperCAmelCase : List[Any] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_UpperCAmelCase : Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
_UpperCAmelCase : Any = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_UpperCAmelCase : List[Any] = s - 2 * j
break
return diff
| 322
| 1
|
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowerCamelCase__ = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, 'r', encoding='utf-8') as f:
lowerCamelCase__ = json.load(f)
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = FSMTForConditionalGeneration.from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 2_6.0],
["ru-en", 2_2.0],
["en-de", 2_2.0],
["de-en", 2_9.0],
] )
@slow
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Dict = F"""facebook/wmt19-{pair}"""
_UpperCAmelCase : List[str] = self.get_tokenizer(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.get_model(lowerCamelCase__ )
_UpperCAmelCase : Dict = bleu_data[pair]["src"]
_UpperCAmelCase : List[str] = bleu_data[pair]["tgt"]
_UpperCAmelCase : List[str] = tokenizer(lowerCamelCase__ , return_tensors="pt" , truncation=lowerCamelCase__ , padding="longest" ).to(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
_UpperCAmelCase : Optional[Any] = tokenizer.batch_decode(
lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = calculate_bleu(lowerCamelCase__ , lowerCamelCase__ )
print(lowerCamelCase__ )
self.assertGreaterEqual(scores["bleu"] , lowerCamelCase__ )
| 322
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "resnet"
lowerCAmelCase : Union[str, Any] = ["basic", "bottleneck"]
def __init__( self : Dict , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Any=64 , lowerCamelCase__ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase__ : int=[3, 4, 6, 3] , lowerCamelCase__ : Dict="bottleneck" , lowerCamelCase__ : Dict="relu" , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Any=None , lowerCamelCase__ : int=None , **lowerCamelCase__ : Tuple , ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : List[str] = embedding_size
_UpperCAmelCase : Tuple = hidden_sizes
_UpperCAmelCase : Dict = depths
_UpperCAmelCase : List[Any] = layer_type
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Tuple = downsample_in_first_stage
_UpperCAmelCase : str = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[Any] = version.parse("1.11" )
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self : str ) ->float:
'''simple docstring'''
return 1E-3
| 322
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : List[Any] = KandinskyVaaControlnetImgaImgPipeline
lowerCAmelCase : Optional[Any] = ["image_embeds", "negative_image_embeds", "image", "hint"]
lowerCAmelCase : str = ["image_embeds", "negative_image_embeds", "image", "hint"]
lowerCAmelCase : Optional[Any] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCAmelCase : List[Any] = False
@property
def lowerCAmelCase__ ( self : Any ) ->str:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self : int ) ->int:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
return 1_00
@property
def lowerCAmelCase__ ( self : Any ) ->List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_UpperCAmelCase : List[str] = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self : List[str] ) ->Dict:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : int = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self : str ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = self.dummy_unet
_UpperCAmelCase : Dict = self.dummy_movq
_UpperCAmelCase : Tuple = {
"num_train_timesteps": 10_00,
"beta_schedule": "linear",
"beta_start": 0.0_0_0_8_5,
"beta_end": 0.0_1_2,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_UpperCAmelCase : Dict = DDIMScheduler(**lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any=0 ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
_UpperCAmelCase : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase__ )
# create init_image
_UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
_UpperCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : List[Any] = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("RGB" ).resize((2_56, 2_56) )
# create hint
_UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith("mps" ):
_UpperCAmelCase : Optional[Any] = torch.manual_seed(lowerCamelCase__ )
else:
_UpperCAmelCase : int = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_UpperCAmelCase : Any = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Any = "cpu"
_UpperCAmelCase : List[Any] = self.get_dummy_components()
_UpperCAmelCase : Tuple = self.pipeline_class(**lowerCamelCase__ )
_UpperCAmelCase : Dict = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : str = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
_UpperCAmelCase : Optional[int] = output.images
_UpperCAmelCase : str = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0]
_UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase : List[Any] = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
_UpperCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
_UpperCAmelCase : Optional[Any] = init_image.resize((5_12, 5_12) )
_UpperCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
_UpperCAmelCase : Union[str, Any] = torch.from_numpy(np.array(lowerCamelCase__ ) ).float() / 2_5_5.0
_UpperCAmelCase : Dict = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_UpperCAmelCase : Tuple = "A robot, 4k photo"
_UpperCAmelCase : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
_UpperCAmelCase : Optional[int] = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase , _UpperCAmelCase : List[str] = pipe_prior(
lowerCamelCase__ , image=lowerCamelCase__ , strength=0.8_5 , generator=lowerCamelCase__ , negative_prompt="" , ).to_tuple()
_UpperCAmelCase : Dict = pipeline(
image=lowerCamelCase__ , image_embeds=lowerCamelCase__ , negative_image_embeds=lowerCamelCase__ , hint=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="np" , )
_UpperCAmelCase : List[str] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 322
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __lowerCAmelCase (__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
_UpperCAmelCase : int = [image]
_UpperCAmelCase : str = [trans(img.convert("RGB" ) ) for img in image]
_UpperCAmelCase : Optional[Any] = torch.stack(__lowerCAmelCase )
return image
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : int ) ->int:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str ) ->Union[str, Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = min(int(num_inference_steps * strength ) , lowerCamelCase__ )
_UpperCAmelCase : str = max(num_inference_steps - init_timestep , 0 )
_UpperCAmelCase : List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any]=None ) ->str:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase__ )}""" )
_UpperCAmelCase : Union[str, Any] = image.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_UpperCAmelCase : List[str] = init_latents.shape
_UpperCAmelCase : Optional[int] = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ )
# get latents
print("add noise to latents at timestep" , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.scheduler.add_noise(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , lowerCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image] = None , lowerCamelCase__ : float = 0.8 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , ) ->Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowerCamelCase__ )
# 2. Preprocess image
_UpperCAmelCase : Dict = preprocess(lowerCamelCase__ )
# 3. set timesteps
self.scheduler.set_timesteps(lowerCamelCase__ , device=self.device )
_UpperCAmelCase , _UpperCAmelCase : Any = self.get_timesteps(lowerCamelCase__ , lowerCamelCase__ , self.device )
_UpperCAmelCase : List[Any] = timesteps[:1].repeat(lowerCamelCase__ )
# 4. Prepare latent variables
_UpperCAmelCase : Optional[int] = self.prepare_latents(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.unet.dtype , self.device , lowerCamelCase__ )
_UpperCAmelCase : Any = latents
# 5. Denoising loop
for t in self.progress_bar(lowerCamelCase__ ):
# 1. predict noise model_output
_UpperCAmelCase : Union[str, Any] = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase : int = self.scheduler.step(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , eta=lowerCamelCase__ , use_clipped_model_output=lowerCamelCase__ , generator=lowerCamelCase__ , ).prev_sample
_UpperCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowerCamelCase__ )
| 322
| 1
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = '▁'
lowerCamelCase__ = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowerCamelCase__ = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowerCamelCase__ = {
'facebook/s2t-small-librispeech-asr': 1_024,
}
lowerCamelCase__ = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowerCamelCase__ = {'mustc': MUSTC_LANGS}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : int = MAX_MODEL_INPUT_SIZES
lowerCAmelCase : Any = ["input_ids", "attention_mask"]
lowerCAmelCase : List[int] = []
def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int]="<s>" , lowerCamelCase__ : str="</s>" , lowerCamelCase__ : Tuple="<pad>" , lowerCamelCase__ : List[Any]="<unk>" , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : int=None , lowerCamelCase__ : str=None , lowerCamelCase__ : Optional[Dict[str, Any]] = None , **lowerCamelCase__ : str , ) ->None:
'''simple docstring'''
_UpperCAmelCase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , do_upper_case=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , tgt_lang=lowerCamelCase__ , lang_codes=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
_UpperCAmelCase : Union[str, Any] = do_upper_case
_UpperCAmelCase : int = do_lower_case
_UpperCAmelCase : Union[str, Any] = load_json(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase : List[Any] = spm_file
_UpperCAmelCase : int = load_spm(lowerCamelCase__ , self.sp_model_kwargs )
if lang_codes is not None:
_UpperCAmelCase : List[str] = lang_codes
_UpperCAmelCase : List[Any] = LANGUAGES[lang_codes]
_UpperCAmelCase : int = [F"""<lang:{lang}>""" for lang in self.langs]
_UpperCAmelCase : List[Any] = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
_UpperCAmelCase : List[Any] = self.lang_tokens
_UpperCAmelCase : List[Any] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_UpperCAmelCase : List[Any] = {}
@property
def lowerCAmelCase__ ( self : List[str] ) ->int:
'''simple docstring'''
return len(self.encoder )
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict ) ->None:
'''simple docstring'''
_UpperCAmelCase : Tuple = new_tgt_lang
self.set_tgt_lang_special_tokens(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.lang_code_to_id[tgt_lang]
_UpperCAmelCase : int = [lang_code_id]
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : str ) ->Tuple:
'''simple docstring'''
return self.encoder.get(lowerCamelCase__ , self.encoder[self.unk_token] )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : int ) ->str:
'''simple docstring'''
return self.decoder.get(lowerCamelCase__ , self.unk_token )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Any = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_UpperCAmelCase : Dict = self.sp_model.decode(lowerCamelCase__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_UpperCAmelCase : int = []
else:
current_sub_tokens.append(lowerCamelCase__ )
_UpperCAmelCase : List[str] = self.sp_model.decode(lowerCamelCase__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int=None ) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
_UpperCAmelCase : Tuple = [1] * len(self.prefix_tokens )
_UpperCAmelCase : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase__ )) + ([0] * len(lowerCamelCase__ )) + suffix_ones
def lowerCAmelCase__ ( self : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : int = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.__dict__.copy()
_UpperCAmelCase : Any = None
return state
def __setstate__( self : Optional[Any] , lowerCamelCase__ : Dict ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase : Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
_UpperCAmelCase : Dict = Path(lowerCamelCase__ )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
_UpperCAmelCase : Union[str, Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_UpperCAmelCase : List[Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , lowerCamelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowerCamelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(lowerCamelCase__ , "wb" ) as fi:
_UpperCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (str(lowerCamelCase__ ), str(lowerCamelCase__ ))
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Tuple = sentencepiece.SentencePieceProcessor(**__lowerCAmelCase )
spm.Load(str(__lowerCAmelCase ) )
return spm
def __lowerCAmelCase (__lowerCAmelCase ):
with open(__lowerCAmelCase , "r" ) as f:
return json.load(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
with open(__lowerCAmelCase , "w" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase , indent=2 )
| 322
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
lowerCamelCase__ = list[list[float | int]]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : float
for row in range(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = matrix[row][col]
_UpperCAmelCase : Optional[int] = vector[row][0]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCAmelCase , __lowerCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase : str = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __lowerCAmelCase ):
for row in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCAmelCase )
]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix = [[0] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
for x_val, y_val in enumerate(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase : int = y_val
_UpperCAmelCase : List[str] = solve(__lowerCAmelCase , __lowerCAmelCase )
def interpolated_func(__lowerCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCAmelCase ) )
return interpolated_func
def __lowerCAmelCase (__lowerCAmelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __lowerCAmelCase (__lowerCAmelCase = question_function , __lowerCAmelCase = 10 ):
_UpperCAmelCase : list[int] = [func(__lowerCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Callable[[int], int]
_UpperCAmelCase : int
for poly in polynomials:
_UpperCAmelCase : int = 1
while func(__lowerCAmelCase ) == poly(__lowerCAmelCase ):
x_val += 1
ret += poly(__lowerCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322
| 1
|
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowerCAmelCase__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : float , lowerCamelCase__ : Callable , lowerCamelCase__ : int , lowerCamelCase__ : float = 1.0 , lowerCamelCase__ : str = None , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : Any = initial_learning_rate
_UpperCAmelCase : str = warmup_steps
_UpperCAmelCase : Union[str, Any] = power
_UpperCAmelCase : List[str] = decay_schedule_fn
_UpperCAmelCase : List[str] = name
def __call__( self : Tuple , lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
with tf.name_scope(self.name or "WarmUp" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
_UpperCAmelCase : str = tf.cast(lowerCamelCase__ , tf.floataa )
_UpperCAmelCase : List[Any] = tf.cast(self.warmup_steps , tf.floataa )
_UpperCAmelCase : Optional[int] = global_step_float / warmup_steps_float
_UpperCAmelCase : Union[str, Any] = self.initial_learning_rate * tf.math.pow(lowerCamelCase__ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowerCamelCase__ , )
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 0.9 , __lowerCAmelCase = 0.9_9_9 , __lowerCAmelCase = 1e-8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = None , ):
_UpperCAmelCase : Union[str, Any] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowerCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__lowerCAmelCase , )
if num_warmup_steps:
_UpperCAmelCase : Any = WarmUp(
initial_learning_rate=__lowerCAmelCase , decay_schedule_fn=__lowerCAmelCase , warmup_steps=__lowerCAmelCase , )
if weight_decay_rate > 0.0:
_UpperCAmelCase : str = AdamWeightDecay(
learning_rate=__lowerCAmelCase , weight_decay_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=__lowerCAmelCase , )
else:
_UpperCAmelCase : str = tf.keras.optimizers.Adam(
learning_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : List[str] , lowerCamelCase__ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.0_0_1 , lowerCamelCase__ : float = 0.9 , lowerCamelCase__ : float = 0.9_9_9 , lowerCamelCase__ : float = 1E-7 , lowerCamelCase__ : bool = False , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Optional[List[str]] = None , lowerCamelCase__ : Optional[List[str]] = None , lowerCamelCase__ : str = "AdamWeightDecay" , **lowerCamelCase__ : int , ) ->Optional[int]:
'''simple docstring'''
super().__init__(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Tuple = weight_decay_rate
_UpperCAmelCase : int = include_in_weight_decay
_UpperCAmelCase : Tuple = exclude_from_weight_decay
@classmethod
def lowerCAmelCase__ ( cls : Any , lowerCamelCase__ : str ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = {"WarmUp": WarmUp}
return super(lowerCamelCase__ , cls ).from_config(lowerCamelCase__ , custom_objects=lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str ) ->Optional[Any]:
'''simple docstring'''
super(lowerCamelCase__ , self )._prepare_local(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate" )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple=None , **lowerCamelCase__ : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[str] = list(zip(*lowerCamelCase__ ) )
return super(lowerCamelCase__ , self ).apply_gradients(zip(lowerCamelCase__ , lowerCamelCase__ ) , name=lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) ->str:
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
_UpperCAmelCase : Any = apply_state or {}
_UpperCAmelCase : Tuple = apply_state.get((var_device, var_dtype) )
if coefficients is None:
_UpperCAmelCase : Optional[Any] = self._fallback_apply_state(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[str] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int=None ) ->str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Dict = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self._decay_weights_op(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__ , self )._resource_apply_dense(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : int , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Any=None ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Dict = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase__ )
_UpperCAmelCase : str = self._decay_weights_op(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__ , self )._resource_apply_sparse(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate} )
return config
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Any ) ->str:
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCamelCase__ , lowerCamelCase__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCamelCase__ , lowerCamelCase__ ) is not None:
return False
return True
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Dict = None
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
if self._accum_steps is None:
_UpperCAmelCase : List[str] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowerCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : str , lowerCamelCase__ : List[Any] ) ->Dict:
'''simple docstring'''
if not self._gradients:
_UpperCAmelCase : List[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCamelCase__ ) , trainable=lowerCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCamelCase__ ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase__ )}""" )
for accum_gradient, gradient in zip(self._gradients , lowerCamelCase__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCamelCase__ )
self._accum_steps.assign_add(1 )
def lowerCAmelCase__ ( self : Optional[Any] ) ->str:
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCamelCase__ ) )
| 322
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 322
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : List[str] = MvpTokenizer
lowerCAmelCase : List[Any] = MvpTokenizerFast
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : str = filter_roberta_detectors
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
super().setUp()
_UpperCAmelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_UpperCAmelCase : Union[str, Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
_UpperCAmelCase : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCAmelCase : List[Any] = {"unk_token": "<unk>"}
_UpperCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : Union[str, Any] , **lowerCamelCase__ : List[str] ) ->Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , **lowerCamelCase__ : Union[str, Any] ) ->Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__ ( self : int ) ->Optional[Any]:
'''simple docstring'''
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def lowerCAmelCase__ ( self : List[Any] ) ->Dict:
'''simple docstring'''
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def lowerCAmelCase__ ( self : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase : Tuple = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : Optional[int] = tokenizer(lowerCamelCase__ , max_length=len(lowerCamelCase__ ) , padding=lowerCamelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_UpperCAmelCase : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that special tokens are reset
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : Dict = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" , lowerCamelCase__ )
self.assertIn("attention_mask" , lowerCamelCase__ )
self.assertNotIn("labels" , lowerCamelCase__ )
self.assertNotIn("decoder_attention_mask" , lowerCamelCase__ )
@require_torch
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : Tuple = tokenizer(text_target=lowerCamelCase__ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : Union[str, Any] = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors="pt" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 10_24) )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ["A long paragraph for summarization."]
_UpperCAmelCase : str = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : Union[str, Any] = tokenizer(lowerCamelCase__ , text_target=lowerCamelCase__ , return_tensors="pt" )
_UpperCAmelCase : Dict = inputs["input_ids"]
_UpperCAmelCase : List[Any] = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCAmelCase__ ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Any = "A, <mask> AllenNLP sentence."
_UpperCAmelCase : Any = tokenizer_r.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
_UpperCAmelCase : str = tokenizer_p.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
_UpperCAmelCase : int = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
_UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCamelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 322
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar('T')
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : T ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = data
_UpperCAmelCase : Node[T] | None = None
def __str__( self : Any ) ->str:
'''simple docstring'''
return F"""{self.data}"""
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Tuple ) ->None:
'''simple docstring'''
_UpperCAmelCase : Node[T] | None = None
def __iter__( self : List[str] ) ->Iterator[T]:
'''simple docstring'''
_UpperCAmelCase : Any = self.top
while node:
yield node.data
_UpperCAmelCase : Dict = node.next
def __str__( self : Dict ) ->str:
'''simple docstring'''
return "->".join([str(lowerCamelCase__ ) for item in self] )
def __len__( self : Optional[int] ) ->int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def lowerCAmelCase__ ( self : List[Any] ) ->bool:
'''simple docstring'''
return self.top is None
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : T ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = Node(lowerCamelCase__ )
if not self.is_empty():
_UpperCAmelCase : Tuple = self.top
_UpperCAmelCase : List[str] = node
def lowerCAmelCase__ ( self : Union[str, Any] ) ->T:
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.top
_UpperCAmelCase : Optional[Any] = self.top.next
return pop_node.data
def lowerCAmelCase__ ( self : Union[str, Any] ) ->T:
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def lowerCAmelCase__ ( self : List[Any] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 322
| 1
|
'''simple docstring'''
import re
def __lowerCAmelCase (__lowerCAmelCase ):
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
try:
_UpperCAmelCase : Optional[int] = split_input(__lowerCAmelCase )
if upper:
_UpperCAmelCase : Optional[Any] = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
_UpperCAmelCase : Optional[Any] = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __lowerCAmelCase (__lowerCAmelCase ):
return to_simple_case(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
try:
_UpperCAmelCase : Tuple = to_simple_case(__lowerCAmelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return to_complex_case(__lowerCAmelCase , __lowerCAmelCase , "_" )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return to_complex_case(__lowerCAmelCase , __lowerCAmelCase , "-" )
if __name__ == "__main__":
__import__('doctest').testmod()
| 322
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : int = "speech_to_text_2"
lowerCAmelCase : str = ["past_key_values"]
lowerCAmelCase : int = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[Any] , lowerCamelCase__ : Tuple=1_00_00 , lowerCamelCase__ : Any=6 , lowerCamelCase__ : Tuple=20_48 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple="relu" , lowerCamelCase__ : Dict=2_56 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : List[Any]=0.0_2 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Any=1 , lowerCamelCase__ : int=0 , lowerCamelCase__ : str=2 , lowerCamelCase__ : List[Any]=10_24 , **lowerCamelCase__ : str , ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Optional[int] = d_model
_UpperCAmelCase : List[Any] = decoder_ffn_dim
_UpperCAmelCase : Any = decoder_layers
_UpperCAmelCase : int = decoder_attention_heads
_UpperCAmelCase : Any = dropout
_UpperCAmelCase : List[Any] = attention_dropout
_UpperCAmelCase : Optional[int] = activation_dropout
_UpperCAmelCase : List[Any] = activation_function
_UpperCAmelCase : int = init_std
_UpperCAmelCase : Dict = decoder_layerdrop
_UpperCAmelCase : str = use_cache
_UpperCAmelCase : Union[str, Any] = decoder_layers
_UpperCAmelCase : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase : Any = max_target_positions
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
| 322
| 1
|
'''simple docstring'''
lowerCamelCase__ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
lowerCamelCase__ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : Tuple = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
order.append(__lowerCAmelCase )
return order
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = True
_UpperCAmelCase : List[Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return component
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = len(__lowerCAmelCase ) * [False]
_UpperCAmelCase : dict[int, list[int]] = {vert: [] for vert in range(len(__lowerCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = []
for i, was_visited in enumerate(__lowerCAmelCase ):
if not was_visited:
order += topology_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : str = len(__lowerCAmelCase ) * [False]
for i in range(len(__lowerCAmelCase ) ):
_UpperCAmelCase : str = order[len(__lowerCAmelCase ) - i - 1]
if not visited[vert]:
_UpperCAmelCase : Optional[int] = find_components(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
components_list.append(__lowerCAmelCase )
return components_list
| 322
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=100 , __lowerCAmelCase=" " ):
_UpperCAmelCase : Any = text.split(__lowerCAmelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase : Dict = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(__lowerCAmelCase ):
titles.append(title if title is not None else "" )
texts.append(__lowerCAmelCase )
return {"title": titles, "text": texts}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : str = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=__lowerCAmelCase , padding="longest" , return_tensors="pt" )["input_ids"]
_UpperCAmelCase : str = ctx_encoder(input_ids.to(device=__lowerCAmelCase ) , return_dict=__lowerCAmelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
######################################
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_UpperCAmelCase : Optional[int] = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_UpperCAmelCase : Optional[int] = dataset.map(__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
_UpperCAmelCase : Union[str, Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_UpperCAmelCase : Dict = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
_UpperCAmelCase : int = dataset.map(
partial(__lowerCAmelCase , ctx_encoder=__lowerCAmelCase , ctx_tokenizer=__lowerCAmelCase ) , batched=__lowerCAmelCase , batch_size=processing_args.batch_size , features=__lowerCAmelCase , )
# And finally save your dataset
_UpperCAmelCase : List[Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(__lowerCAmelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_UpperCAmelCase : Any = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=__lowerCAmelCase )
# And save the index
_UpperCAmelCase : List[str] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(__lowerCAmelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : str = field(
default=str(Path(UpperCAmelCase__ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
lowerCAmelCase : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
lowerCAmelCase : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
lowerCAmelCase : Optional[str] = field(
default=str(Path(UpperCAmelCase__ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
lowerCAmelCase : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
lowerCAmelCase : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 322
| 1
|
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCamelCase__ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase__ :
def __init__( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any]=16 , lowerCamelCase__ : Any=13 , lowerCamelCase__ : Any=7 , lowerCamelCase__ : Any=14 , lowerCamelCase__ : Any=10 , lowerCamelCase__ : List[Any]=19 , lowerCamelCase__ : Optional[int]=5 , lowerCamelCase__ : Any=4 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : str=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : str=4 , lowerCamelCase__ : str=4 , lowerCamelCase__ : Optional[Any]="gelu" , lowerCamelCase__ : Optional[Any]=0.1 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Tuple=[1, 2, 3, 4, 5] , lowerCamelCase__ : Union[str, Any]=25 , lowerCamelCase__ : Tuple=5 , ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = d_model
_UpperCAmelCase : Any = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Union[str, Any] = prediction_length
_UpperCAmelCase : Dict = context_length
_UpperCAmelCase : Tuple = cardinality
_UpperCAmelCase : Dict = num_time_features
_UpperCAmelCase : Any = lags_sequence
_UpperCAmelCase : Optional[Any] = embedding_dimension
_UpperCAmelCase : str = is_training
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : List[str] = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] = context_length
_UpperCAmelCase : str = prediction_length + label_length
_UpperCAmelCase : List[str] = label_length
_UpperCAmelCase : Optional[Any] = moving_average
_UpperCAmelCase : int = autocorrelation_factor
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Any = config.context_length + max(config.lags_sequence )
_UpperCAmelCase : Dict = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_UpperCAmelCase : str = floats_tensor([self.batch_size, _past_length] )
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_UpperCAmelCase : int = floats_tensor([self.batch_size, config.prediction_length] )
_UpperCAmelCase : List[str] = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_config()
_UpperCAmelCase : Any = self.prepare_autoformer_inputs_dict(lowerCamelCase__ )
return config, inputs_dict
def lowerCAmelCase__ ( self : str ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase : Dict = AutoformerModel(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = outputs.encoder_last_hidden_state
_UpperCAmelCase : str = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : int = model.get_encoder()
encoder.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase : Dict = AutoformerEncoder.from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = model.create_network_inputs(**lowerCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_UpperCAmelCase : List[str] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_UpperCAmelCase : Tuple = encoder(inputs_embeds=lowerCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
_UpperCAmelCase : Union[str, Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_UpperCAmelCase : Dict = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_UpperCAmelCase : Optional[int] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_UpperCAmelCase : Union[str, Any] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : List[str] = model.get_decoder()
decoder.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase : int = AutoformerDecoder.from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = decoder(
trend=lowerCamelCase__ , inputs_embeds=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : int = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowerCAmelCase : Union[str, Any] = (AutoformerForPrediction,) if is_torch_available() else ()
lowerCAmelCase : List[Any] = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
lowerCAmelCase : List[Any] = False
lowerCAmelCase : int = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : List[Any] = False
def lowerCAmelCase__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = AutoformerModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase : Dict = model_class.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCamelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = inspect.signature(getattr(lowerCamelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
_UpperCAmelCase : Union[str, Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[Any] = model_class(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : List[str] = [*signature.parameters.keys()]
_UpperCAmelCase : Optional[int] = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(lowerCamelCase__ )] , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Union[str, Any] = getattr(self.model_tester , "seq_length" , lowerCamelCase__ )
_UpperCAmelCase : Dict = getattr(self.model_tester , "decoder_seq_length" , lowerCamelCase__ )
_UpperCAmelCase : Dict = getattr(self.model_tester , "encoder_seq_length" , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = getattr(self.model_tester , "d_model" , lowerCamelCase__ )
_UpperCAmelCase : Dict = getattr(self.model_tester , "num_attention_heads" , lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Union[str, Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : int = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_UpperCAmelCase : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = outputs.encoder_attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_UpperCAmelCase : Optional[Any] = len(lowerCamelCase__ )
_UpperCAmelCase : Any = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
# decoder attentions
_UpperCAmelCase : Tuple = outputs.decoder_attentions
self.assertIsInstance(lowerCamelCase__ , (list, tuple) )
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_UpperCAmelCase : Union[str, Any] = outputs.cross_attentions
self.assertIsInstance(lowerCamelCase__ , (list, tuple) )
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_UpperCAmelCase : Any = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Tuple = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(out_len + 2 , len(lowerCamelCase__ ) )
_UpperCAmelCase : List[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def __lowerCAmelCase (__lowerCAmelCase="train-batch.pt" ):
_UpperCAmelCase : Optional[int] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=__lowerCAmelCase , repo_type="dataset" )
_UpperCAmelCase : str = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
return batch
@require_torch
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase : List[Any] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCamelCase__ )
_UpperCAmelCase : Any = prepare_batch()
with torch.no_grad():
_UpperCAmelCase : Any = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
_UpperCAmelCase : Dict = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowerCamelCase__ )
_UpperCAmelCase : Tuple = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=lowerCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCamelCase__ )
_UpperCAmelCase : Dict = prepare_batch("val-batch.pt" )
with torch.no_grad():
_UpperCAmelCase : Optional[int] = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
_UpperCAmelCase : Dict = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=lowerCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = prepare_batch("val-batch.pt" )
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
_UpperCAmelCase : Tuple = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowerCamelCase__ )
_UpperCAmelCase : Tuple = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=lowerCamelCase__ )
_UpperCAmelCase : List[str] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCamelCase__ , rtol=1E-1 ) )
| 322
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase__ = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
@classmethod
def lowerCAmelCase__ ( cls : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] ) ->int:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
_UpperCAmelCase : List[str] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ , repo_id="test-config" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_UpperCAmelCase : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
_UpperCAmelCase : List[str] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase__ , repo_id="valid_org/test-config-org" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_UpperCAmelCase : int = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
CustomConfig.register_for_auto_class()
_UpperCAmelCase : int = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
_UpperCAmelCase : str = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_UpperCAmelCase : Any = c.n_embd + 1 # int
_UpperCAmelCase : List[Any] = c.resid_pdrop + 1.0 # float
_UpperCAmelCase : Tuple = not c.scale_attn_weights # bool
_UpperCAmelCase : List[Any] = c.summary_type + "foo" # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(lowerCamelCase__ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(lowerCamelCase__ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(lowerCamelCase__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(lowerCamelCase__ , c.summary_type , "mismatch for key: summary_type" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = PretrainedConfig()
_UpperCAmelCase : Tuple = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
_UpperCAmelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase__ , lowerCamelCase__ )]
if len(lowerCamelCase__ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F""" {', '.join(lowerCamelCase__ )}.""" )
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCAmelCase : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
_UpperCAmelCase : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = mock.Mock()
_UpperCAmelCase : List[str] = 5_00
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : Tuple = HTTPError
_UpperCAmelCase : Any = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase : int = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=lowerCamelCase__ ) as mock_head:
_UpperCAmelCase : Union[str, Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = AutoConfig.from_pretrained("bert-base-cased" )
_UpperCAmelCase : str = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase : Dict = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCamelCase__ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_UpperCAmelCase : Dict = ["config.42.0.0.json"]
_UpperCAmelCase : Union[str, Any] = 7_68
configuration.save_pretrained(lowerCamelCase__ )
shutil.move(os.path.join(lowerCamelCase__ , "config.4.0.0.json" ) , os.path.join(lowerCamelCase__ , "config.42.0.0.json" ) )
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def lowerCAmelCase__ ( self : List[str] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
_UpperCAmelCase : Any = "v4.0.0"
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_UpperCAmelCase : List[Any] = "v3.0.0"
_UpperCAmelCase : int = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 322
| 1
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : Optional[int] , lowerCamelCase__ : int = 7_68 , ) ->List[Any]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : List[Any] = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
_UpperCAmelCase : Any = nn.Parameter(torch.ones(1 , lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[Union[str, torch.device]] = None , lowerCamelCase__ : Optional[torch.dtype] = None , ) ->str:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = nn.Parameter(self.mean.to(lowerCamelCase__ ).to(lowerCamelCase__ ) )
_UpperCAmelCase : str = nn.Parameter(self.std.to(lowerCamelCase__ ).to(lowerCamelCase__ ) )
return self
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = (embeds * self.std) + self.mean
return embeds
| 322
|
'''simple docstring'''
from manim import *
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : List[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase : Optional[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Dict = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : str = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[Any] = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : int = Text("CPU" , font_size=24 )
_UpperCAmelCase : Any = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(1 )]
_UpperCAmelCase : str = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : int = Text("GPU" , font_size=24 )
_UpperCAmelCase : str = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
gpu.align_to(lowerCamelCase__ , lowerCamelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Any = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[int] = Text("Model" , font_size=24 )
_UpperCAmelCase : Tuple = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , )
_UpperCAmelCase : int = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
_UpperCAmelCase : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase : Union[str, Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ , run_time=2.5 ) , Write(lowerCamelCase__ ) , Write(lowerCamelCase__ ) )
self.add(lowerCamelCase__ )
_UpperCAmelCase : int = []
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Dict = []
for i, rect in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : int = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 )
cpu_target.move_to(lowerCamelCase__ )
cpu_target.generate_target()
_UpperCAmelCase : Dict = 0.4_6 / 4
_UpperCAmelCase : Any = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCamelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase__ , buff=0.0 )
cpu_targs.append(lowerCamelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase__ ) )
second_animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(*lowerCamelCase__ )
self.wait()
| 322
| 1
|
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1_024 , __lowerCAmelCase=1_024 , __lowerCAmelCase=False , **__lowerCAmelCase ):
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : List[str] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="train" , **__lowerCAmelCase )
_UpperCAmelCase : Dict = tok.pad_token_id
def get_lens(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = tqdm(
DataLoader(__lowerCAmelCase , batch_size=512 , num_workers=8 , shuffle=__lowerCAmelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_UpperCAmelCase : List[str] = []
for batch in dl:
_UpperCAmelCase : Any = batch["input_ids"].ne(__lowerCAmelCase ).sum(1 ).tolist()
_UpperCAmelCase : Tuple = batch["labels"].ne(__lowerCAmelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__lowerCAmelCase , __lowerCAmelCase ):
max_lens.append(max(__lowerCAmelCase , __lowerCAmelCase ) )
else:
max_lens.extend(__lowerCAmelCase )
return max_lens
_UpperCAmelCase : Dict = get_lens(__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="val" , **__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = get_lens(__lowerCAmelCase )
pickle_save(__lowerCAmelCase , train_ds.len_file )
pickle_save(__lowerCAmelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 322
|
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1_024 , __lowerCAmelCase=1_024 , __lowerCAmelCase=False , **__lowerCAmelCase ):
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : List[str] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="train" , **__lowerCAmelCase )
_UpperCAmelCase : Dict = tok.pad_token_id
def get_lens(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = tqdm(
DataLoader(__lowerCAmelCase , batch_size=512 , num_workers=8 , shuffle=__lowerCAmelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_UpperCAmelCase : List[str] = []
for batch in dl:
_UpperCAmelCase : Any = batch["input_ids"].ne(__lowerCAmelCase ).sum(1 ).tolist()
_UpperCAmelCase : Tuple = batch["labels"].ne(__lowerCAmelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__lowerCAmelCase , __lowerCAmelCase ):
max_lens.append(max(__lowerCAmelCase , __lowerCAmelCase ) )
else:
max_lens.extend(__lowerCAmelCase )
return max_lens
_UpperCAmelCase : Dict = get_lens(__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="val" , **__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = get_lens(__lowerCAmelCase )
pickle_save(__lowerCAmelCase , train_ds.len_file )
pickle_save(__lowerCAmelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 322
| 1
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if index == r:
for j in range(__lowerCAmelCase ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_UpperCAmelCase : List[Any] = arr[i]
combination_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , index + 1 , __lowerCAmelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# A temporary array to store all combination one by one
_UpperCAmelCase : List[Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 0 , __lowerCAmelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowerCamelCase__ = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 322
|
'''simple docstring'''
import pytest
lowerCamelCase__ = '__dummy_dataset1__'
lowerCamelCase__ = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __lowerCAmelCase ():
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __lowerCAmelCase ():
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = dataset_loading_script_name
_UpperCAmelCase : Any = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = script_dir / F"""{script_name}.py"""
with open(__lowerCAmelCase , "w" ) as f:
f.write(__lowerCAmelCase )
return str(__lowerCAmelCase )
| 322
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCamelCase__ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 322
|
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase__ = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCamelCase__ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCamelCase__ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCamelCase__ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] ) ->int:
'''simple docstring'''
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int=0.9 , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : Dict=0.5 ) ->Any:
'''simple docstring'''
if NLTK_VERSION >= version.Version("3.6.5" ):
_UpperCAmelCase : Dict = [
meteor_score.single_meteor_score(
word_tokenize(lowerCamelCase__ ) , word_tokenize(lowerCamelCase__ ) , alpha=lowerCamelCase__ , beta=lowerCamelCase__ , gamma=lowerCamelCase__ )
for ref, pred in zip(lowerCamelCase__ , lowerCamelCase__ )
]
else:
_UpperCAmelCase : Optional[int] = [
meteor_score.single_meteor_score(lowerCamelCase__ , lowerCamelCase__ , alpha=lowerCamelCase__ , beta=lowerCamelCase__ , gamma=lowerCamelCase__ )
for ref, pred in zip(lowerCamelCase__ , lowerCamelCase__ )
]
return {"meteor": np.mean(lowerCamelCase__ )}
| 322
| 1
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCAmelCase : str = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase : ClassVar[Features] = Features({"text": Value("string" )} )
lowerCAmelCase : ClassVar[Features] = Features({"labels": ClassLabel} )
lowerCAmelCase : str = "text"
lowerCAmelCase : str = "labels"
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Tuple ) ->Optional[int]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , lowerCamelCase__ ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
_UpperCAmelCase : Any = copy.deepcopy(self )
_UpperCAmelCase : Tuple = self.label_schema.copy()
_UpperCAmelCase : str = features[self.label_column]
_UpperCAmelCase : str = label_schema
return task_template
@property
def lowerCAmelCase__ ( self : int ) ->Dict[str, str]:
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 322
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : int ) ->str:
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) ->str:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(lowerCamelCase__ ) )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[Any] = [sequences]
_UpperCAmelCase : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase__ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCAmelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[Any]=ZeroShotClassificationArgumentHandler() , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = args_parser
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : str=TruncationStrategy.ONLY_FIRST , **lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
_UpperCAmelCase : Optional[Any] = self.tokenizer.eos_token
try:
_UpperCAmelCase : List[str] = self.tokenizer(
lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , )
except Exception as e:
if "too short" in str(lowerCamelCase__ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_UpperCAmelCase : List[Any] = self.tokenizer(
lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def lowerCAmelCase__ ( self : int , **lowerCamelCase__ : Union[str, Any] ) ->Tuple:
'''simple docstring'''
if kwargs.get("multi_class" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : int = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
_UpperCAmelCase : Dict = {}
if "candidate_labels" in kwargs:
_UpperCAmelCase : List[Any] = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
_UpperCAmelCase : Dict = kwargs["hypothesis_template"]
_UpperCAmelCase : List[str] = {}
if "multi_label" in kwargs:
_UpperCAmelCase : Optional[Any] = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self : int , lowerCamelCase__ : Union[str, List[str]] , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[Any] , ) ->Optional[int]:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0:
pass
elif len(lowerCamelCase__ ) == 1 and "candidate_labels" not in kwargs:
_UpperCAmelCase : int = args[0]
else:
raise ValueError(F"""Unable to understand extra arguments {args}""" )
return super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : str="This example is {}." ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self._args_parser(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ):
_UpperCAmelCase : Optional[int] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowerCamelCase__ ) - 1,
**model_input,
}
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Dict = inputs["candidate_label"]
_UpperCAmelCase : Optional[int] = inputs["sequence"]
_UpperCAmelCase : Dict = {k: inputs[k] for k in self.tokenizer.model_input_names}
_UpperCAmelCase : List[Any] = self.model(**lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple=False ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = [outputs["candidate_label"] for outputs in model_outputs]
_UpperCAmelCase : Any = [outputs["sequence"] for outputs in model_outputs]
_UpperCAmelCase : Optional[int] = np.concatenate([output["logits"].numpy() for output in model_outputs] )
_UpperCAmelCase : Optional[Any] = logits.shape[0]
_UpperCAmelCase : Any = len(lowerCamelCase__ )
_UpperCAmelCase : str = N // n
_UpperCAmelCase : str = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowerCamelCase__ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_UpperCAmelCase : int = self.entailment_id
_UpperCAmelCase : List[Any] = -1 if entailment_id == 0 else 0
_UpperCAmelCase : str = reshaped_outputs[..., [contradiction_id, entailment_id]]
_UpperCAmelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ )
_UpperCAmelCase : str = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_UpperCAmelCase : int = reshaped_outputs[..., self.entailment_id]
_UpperCAmelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 322
| 1
|
'''simple docstring'''
from manim import *
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : Union[str, Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase : Union[str, Any] = Rectangle(height=0.2_5 , width=0.2_5 )
_UpperCAmelCase : str = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_UpperCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Any = [mem.copy() for i in range(6 )]
_UpperCAmelCase : str = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Tuple = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[Any] = Text("CPU" , font_size=24 )
_UpperCAmelCase : List[Any] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : Tuple = [mem.copy() for i in range(4 )]
_UpperCAmelCase : Optional[int] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[Any] = Text("GPU" , font_size=24 )
_UpperCAmelCase : List[str] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : str = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Optional[int] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : str = Text("Model" , font_size=24 )
_UpperCAmelCase : Optional[Any] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : str = []
for i, rect in enumerate(lowerCamelCase__ ):
rect.set_stroke(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCamelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=lowerCamelCase__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowerCamelCase__ , buff=0.0 )
self.add(lowerCamelCase__ )
model_cpu_arr.append(lowerCamelCase__ )
self.add(*lowerCamelCase__ , *lowerCamelCase__ , *lowerCamelCase__ )
_UpperCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : int = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[int] = Text("Loaded Checkpoint" , font_size=24 )
_UpperCAmelCase : Optional[int] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Optional[Any] = []
for i, rect in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : Dict = fill.copy().set_fill(lowerCamelCase__ , opacity=0.7 )
target.move_to(lowerCamelCase__ )
ckpt_arr.append(lowerCamelCase__ )
_UpperCAmelCase : int = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(lowerCamelCase__ )
self.add(*lowerCamelCase__ , *lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase : Union[str, Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Any = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowerCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCamelCase__ )
_UpperCAmelCase : Any = MarkupText(
F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
_UpperCAmelCase : str = [meta_mem.copy() for i in range(6 )]
_UpperCAmelCase : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
_UpperCAmelCase : str = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Any = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Union[str, Any] = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : List[str] = Text("Disk" , font_size=24 )
_UpperCAmelCase : Union[str, Any] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(lowerCamelCase__ , run_time=3 ) , Write(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) )
_UpperCAmelCase : List[str] = []
for i, rect in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : Dict = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(FadeOut(lowerCamelCase__ ) )
_UpperCAmelCase : Any = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ , run_time=3 ) )
self.play(
FadeOut(lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , *lowerCamelCase__ ) , )
self.wait()
| 322
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase = 4_000_000 ):
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase , _UpperCAmelCase : Dict = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : Any = b, a + b
return sum(__lowerCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322
| 1
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowerCamelCase__ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowerCamelCase__ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowerCamelCase__ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
lowerCamelCase__ = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
lowerCamelCase__ = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
lowerCamelCase__ = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(64, 64)
)
lowerCamelCase__ = tf.keras.preprocessing.image.img_to_array(test_image)
lowerCamelCase__ = np.expand_dims(test_image, axis=0)
lowerCamelCase__ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowerCamelCase__ = 'Normal'
if result[0][0] == 1:
lowerCamelCase__ = 'Abnormality detected'
| 322
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str]=13 , lowerCamelCase__ : Optional[Any]=7 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : int=99 , lowerCamelCase__ : int=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Optional[Any]=4 , lowerCamelCase__ : Optional[int]=37 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Optional[int]=16 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Union[str, Any]=0.0_2 , lowerCamelCase__ : Tuple=4 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : Optional[int] = seq_length
_UpperCAmelCase : int = is_training
_UpperCAmelCase : Dict = use_attention_mask
_UpperCAmelCase : Optional[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : List[Any] = type_sequence_label_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Dict = num_choices
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Dict = None
if self.use_attention_mask:
_UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : int = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = config_and_inputs
_UpperCAmelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class_name.from_pretrained("albert-base-v2" )
_UpperCAmelCase : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_UpperCAmelCase : List[Any] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCAmelCase : Dict = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
_UpperCAmelCase : List[Any] = (1, 11, 7_68)
self.assertEqual(output.shape , lowerCamelCase__ )
_UpperCAmelCase : str = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1E-4 ) )
| 322
| 1
|
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowerCamelCase__ = 299_792_458
# Symbols
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ = symbols('ct x y z')
def __lowerCAmelCase (__lowerCAmelCase ):
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def __lowerCAmelCase (__lowerCAmelCase ):
return 1 / sqrt(1 - beta(__lowerCAmelCase ) ** 2 )
def __lowerCAmelCase (__lowerCAmelCase ):
return np.array(
[
[gamma(__lowerCAmelCase ), -gamma(__lowerCAmelCase ) * beta(__lowerCAmelCase ), 0, 0],
[-gamma(__lowerCAmelCase ) * beta(__lowerCAmelCase ), gamma(__lowerCAmelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = None ):
# Ensure event is not empty
if event is None:
_UpperCAmelCase : Union[str, Any] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(__lowerCAmelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowerCamelCase__ = transform(29_979_245)
print('Example of four vector: ')
print(F'''ct\' = {four_vector[0]}''')
print(F'''x\' = {four_vector[1]}''')
print(F'''y\' = {four_vector[2]}''')
print(F'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
lowerCamelCase__ = {ct: c, x: 1, y: 1, z: 1}
lowerCamelCase__ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F'''\n{numerical_vector}''')
| 322
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
lowerCamelCase__ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __lowerCAmelCase (__lowerCAmelCase ):
with open(__lowerCAmelCase , "rb" ) as f:
_UpperCAmelCase : List[str] = Image.open(__lowerCAmelCase )
return im.convert("RGB" )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the training data."} )
lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the validation data."} )
lowerCAmelCase : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : str = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCAmelCase__ )} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
lowerCAmelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase : str = field(default=UpperCAmelCase__ , metadata={"help": "Name or path of preprocessor config."} )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = torch.stack([example["pixel_values"] for example in examples] )
_UpperCAmelCase : Tuple = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __lowerCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , __lowerCAmelCase , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_UpperCAmelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_UpperCAmelCase : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
_UpperCAmelCase : List[Any] = {}
if data_args.train_dir is not None:
_UpperCAmelCase : str = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
_UpperCAmelCase : Optional[Any] = os.path.join(data_args.validation_dir , "**" )
_UpperCAmelCase : Any = load_dataset(
"imagefolder" , data_files=__lowerCAmelCase , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
_UpperCAmelCase : int = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowerCAmelCase ) and data_args.train_val_split > 0.0:
_UpperCAmelCase : List[Any] = dataset["train"].train_test_split(data_args.train_val_split )
_UpperCAmelCase : List[str] = split["train"]
_UpperCAmelCase : Union[str, Any] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_UpperCAmelCase : Optional[int] = dataset["train"].features["labels"].names
_UpperCAmelCase , _UpperCAmelCase : int = {}, {}
for i, label in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : int = str(__lowerCAmelCase )
_UpperCAmelCase : str = label
# Load the accuracy metric from the datasets package
_UpperCAmelCase : int = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCAmelCase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowerCAmelCase ) , labelaid=__lowerCAmelCase , idalabel=__lowerCAmelCase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase : List[str] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
_UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_UpperCAmelCase : int = image_processor.size["shortest_edge"]
else:
_UpperCAmelCase : int = (image_processor.size["height"], image_processor.size["width"])
_UpperCAmelCase : str = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
_UpperCAmelCase : Optional[int] = Compose(
[
RandomResizedCrop(__lowerCAmelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_UpperCAmelCase : Union[str, Any] = Compose(
[
Resize(__lowerCAmelCase ),
CenterCrop(__lowerCAmelCase ),
ToTensor(),
normalize,
] )
def train_transforms(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_UpperCAmelCase : Dict = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__lowerCAmelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_UpperCAmelCase : Optional[Any] = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__lowerCAmelCase )
# Initalize our trainer
_UpperCAmelCase : Union[str, Any] = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase : Any = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : int = last_checkpoint
_UpperCAmelCase : Dict = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCAmelCase : Dict = trainer.evaluate()
trainer.log_metrics("eval" , __lowerCAmelCase )
trainer.save_metrics("eval" , __lowerCAmelCase )
# Write model card and (optionally) push to hub
_UpperCAmelCase : int = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCAmelCase )
else:
trainer.create_model_card(**__lowerCAmelCase )
if __name__ == "__main__":
main()
| 322
| 1
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any]=13 , lowerCamelCase__ : List[str]=30 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Union[str, Any]=3 , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Tuple=32 , lowerCamelCase__ : Any=5 , lowerCamelCase__ : Tuple=4 , lowerCamelCase__ : Dict=37 , lowerCamelCase__ : str="gelu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Optional[Any]=0.1 , lowerCamelCase__ : str=10 , lowerCamelCase__ : List[Any]=0.0_2 , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : Tuple = image_size
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : Any = num_channels
_UpperCAmelCase : Optional[int] = is_training
_UpperCAmelCase : Union[str, Any] = use_labels
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Dict = num_attention_heads
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : Dict = type_sequence_label_size
_UpperCAmelCase : Union[str, Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase : Any = (image_size // patch_size) ** 2
_UpperCAmelCase : Optional[int] = num_patches + 1
def lowerCAmelCase__ ( self : List[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : Any = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxViTModel(config=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(lowerCamelCase__ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase : str = (self.image_size, self.image_size)
_UpperCAmelCase : Any = (self.patch_size, self.patch_size)
_UpperCAmelCase : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = self.type_sequence_label_size
_UpperCAmelCase : Any = FlaxViTForImageClassification(config=lowerCamelCase__ )
_UpperCAmelCase : int = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : str = FlaxViTForImageClassification(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_UpperCAmelCase : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Union[str, Any] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowerCAmelCase__ ( self : Any ) ->None:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxViTModelTester(self )
_UpperCAmelCase : List[str] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : str ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Dict = model_class(lowerCamelCase__ )
_UpperCAmelCase : Tuple = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
@jax.jit
def model_jitted(lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : str ):
return model(pixel_values=lowerCamelCase__ , **lowerCamelCase__ )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase : Union[str, Any] = model_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase : List[Any] = model_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("google/vit-base-patch16-224" )
_UpperCAmelCase : Optional[Any] = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 322
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase__ = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ = 'RegNetConfig'
# Base docstring
lowerCamelCase__ = 'facebook/regnet-y-040'
lowerCamelCase__ = [1, 1_088, 7, 7]
# Image classification docstring
lowerCamelCase__ = 'facebook/regnet-y-040'
lowerCamelCase__ = 'tabby, tabby cat'
lowerCamelCase__ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 3 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[str] = "relu" , **lowerCamelCase__ : Tuple , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_UpperCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_UpperCAmelCase : Dict = tf.keras.layers.ConvaD(
filters=lowerCamelCase__ , kernel_size=lowerCamelCase__ , strides=lowerCamelCase__ , padding="VALID" , groups=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" , )
_UpperCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
_UpperCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.convolution(self.padding(lowerCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = self.normalization(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = config.num_channels
_UpperCAmelCase : Any = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = shape_list(lowerCamelCase__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_UpperCAmelCase : Optional[Any] = tf.transpose(lowerCamelCase__ , perm=(0, 2, 3, 1) )
_UpperCAmelCase : List[Any] = self.embedder(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : int = tf.keras.layers.ConvaD(
filters=lowerCamelCase__ , kernel_size=1 , strides=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" )
_UpperCAmelCase : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False ) ->tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(lowerCamelCase__ ) , training=lowerCamelCase__ )
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ) ->Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" )
_UpperCAmelCase : int = [
tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.pooler(lowerCamelCase__ )
for layer_module in self.attention:
_UpperCAmelCase : str = layer_module(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = hidden_state * pooled
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : Any ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = in_channels != out_channels or stride != 1
_UpperCAmelCase : List[str] = max(1 , out_channels // config.groups_width )
_UpperCAmelCase : List[str] = (
TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_UpperCAmelCase : Optional[int] = [
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.2" ),
]
_UpperCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = hidden_state
for layer_module in self.layers:
_UpperCAmelCase : List[Any] = layer_module(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.shortcut(lowerCamelCase__ )
hidden_state += residual
_UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = in_channels != out_channels or stride != 1
_UpperCAmelCase : Optional[int] = max(1 , out_channels // config.groups_width )
_UpperCAmelCase : Union[str, Any] = (
TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
_UpperCAmelCase : List[Any] = [
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(lowerCamelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.3" ),
]
_UpperCAmelCase : int = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = hidden_state
for layer_module in self.layers:
_UpperCAmelCase : Tuple = layer_module(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.shortcut(lowerCamelCase__ )
hidden_state += residual
_UpperCAmelCase : Tuple = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : str = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
_UpperCAmelCase : List[str] = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ , name="layers.0" ),
*[layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] ) ->List[str]:
'''simple docstring'''
for layer_module in self.layers:
_UpperCAmelCase : Optional[int] = layer_module(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
_UpperCAmelCase : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , depth=lowerCamelCase__ , name=F"""stages.{i+1}""" ) )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True ) ->TFBaseModelOutputWithNoAttention:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCAmelCase : Optional[Any] = hidden_states + (hidden_state,)
_UpperCAmelCase : Dict = stage_module(lowerCamelCase__ )
if output_hidden_states:
_UpperCAmelCase : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase__ , hidden_states=lowerCamelCase__ )
@keras_serializable
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
lowerCAmelCase : Optional[Any] = RegNetConfig
def __init__( self : Union[str, Any] , lowerCamelCase__ : Any , **lowerCamelCase__ : str ) ->int:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = config
_UpperCAmelCase : Union[str, Any] = TFRegNetEmbeddings(lowerCamelCase__ , name="embedder" )
_UpperCAmelCase : Union[str, Any] = TFRegNetEncoder(lowerCamelCase__ , name="encoder" )
_UpperCAmelCase : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" )
@unpack_inputs
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , ) ->TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
_UpperCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.embedder(lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : str = self.encoder(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : Dict = encoder_outputs[0]
_UpperCAmelCase : Dict = self.pooler(lowerCamelCase__ )
# Change to NCHW output format have uniformity in the modules
_UpperCAmelCase : Union[str, Any] = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) )
_UpperCAmelCase : Tuple = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_UpperCAmelCase : List[str] = tuple([tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase__ , pooler_output=lowerCamelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Tuple = RegNetConfig
lowerCAmelCase : Tuple = "regnet"
lowerCAmelCase : Union[str, Any] = "pixel_values"
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
lowerCamelCase__ = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCamelCase__ = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Any , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ) ->Optional[int]:
'''simple docstring'''
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Any=False , ) ->Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.regnet(
pixel_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Union[str, Any] ) ->Any:
'''simple docstring'''
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = config.num_labels
_UpperCAmelCase : Dict = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" )
# classification head
_UpperCAmelCase : str = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict=False , ) ->Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
_UpperCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.regnet(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : int = outputs.pooler_output if return_dict else outputs[1]
_UpperCAmelCase : Dict = self.classifier[0](lowerCamelCase__ )
_UpperCAmelCase : str = self.classifier[1](lowerCamelCase__ )
_UpperCAmelCase : Tuple = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase__ , logits=lowerCamelCase__ )
if not return_dict:
_UpperCAmelCase : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase__ , logits=lowerCamelCase__ , hidden_states=outputs.hidden_states )
| 322
| 1
|
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class lowerCAmelCase__ :
def __init__( self : str ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = psutil.Process()
_UpperCAmelCase : int = False
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = -1
while True:
_UpperCAmelCase : Optional[int] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Any = True
_UpperCAmelCase : Union[str, Any] = threading.Thread(target=self.peak_monitor )
_UpperCAmelCase : Any = True
self.thread.start()
def lowerCAmelCase__ ( self : Union[str, Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Any = False
self.thread.join()
return self.cpu_memory_peak
lowerCamelCase__ = PeakCPUMemory()
def __lowerCAmelCase ():
# Time
_UpperCAmelCase : Dict = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_UpperCAmelCase : List[str] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_UpperCAmelCase : Union[str, Any] = torch.cuda.memory_allocated(__lowerCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def __lowerCAmelCase (__lowerCAmelCase ):
# Time
_UpperCAmelCase : Union[str, Any] = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_UpperCAmelCase : List[Any] = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
_UpperCAmelCase : Union[str, Any] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
_UpperCAmelCase : List[str] = (torch.cuda.memory_allocated(__lowerCAmelCase ) - start_measures[str(__lowerCAmelCase )]) / 2**20
_UpperCAmelCase : Tuple = (torch.cuda.max_memory_allocated(__lowerCAmelCase ) - start_measures[str(__lowerCAmelCase )]) / 2**20
return measures
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(__lowerCAmelCase )]:.2f}MiB""" )
_UpperCAmelCase : Union[str, Any] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" )
| 322
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __lowerCAmelCase (__lowerCAmelCase ):
if is_torch_version("<" , "2.0.0" ) or not hasattr(__lowerCAmelCase , "_dynamo" ):
return False
return isinstance(__lowerCAmelCase , torch._dynamo.eval_frame.OptimizedModule )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = True ):
_UpperCAmelCase : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_UpperCAmelCase : Dict = is_compiled_module(__lowerCAmelCase )
if is_compiled:
_UpperCAmelCase : Optional[int] = model
_UpperCAmelCase : Any = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = model.module
if not keep_fpaa_wrapper:
_UpperCAmelCase : List[Any] = getattr(__lowerCAmelCase , "forward" )
_UpperCAmelCase : Dict = model.__dict__.pop("_original_forward" , __lowerCAmelCase )
if original_forward is not None:
while hasattr(__lowerCAmelCase , "__wrapped__" ):
_UpperCAmelCase : Optional[int] = forward.__wrapped__
if forward == original_forward:
break
_UpperCAmelCase : Dict = forward
if getattr(__lowerCAmelCase , "_converted_to_transformer_engine" , __lowerCAmelCase ):
convert_model(__lowerCAmelCase , to_transformer_engine=__lowerCAmelCase )
if is_compiled:
_UpperCAmelCase : int = model
_UpperCAmelCase : str = compiled_model
return model
def __lowerCAmelCase ():
PartialState().wait_for_everyone()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__lowerCAmelCase , __lowerCAmelCase )
elif PartialState().local_process_index == 0:
torch.save(__lowerCAmelCase , __lowerCAmelCase )
@contextmanager
def __lowerCAmelCase (**__lowerCAmelCase ):
for key, value in kwargs.items():
_UpperCAmelCase : str = str(__lowerCAmelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __lowerCAmelCase (__lowerCAmelCase ):
if not hasattr(__lowerCAmelCase , "__qualname__" ) and not hasattr(__lowerCAmelCase , "__name__" ):
_UpperCAmelCase : List[str] = getattr(__lowerCAmelCase , "__class__" , __lowerCAmelCase )
if hasattr(__lowerCAmelCase , "__qualname__" ):
return obj.__qualname__
if hasattr(__lowerCAmelCase , "__name__" ):
return obj.__name__
return str(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
for key, value in source.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = destination.setdefault(__lowerCAmelCase , {} )
merge_dicts(__lowerCAmelCase , __lowerCAmelCase )
else:
_UpperCAmelCase : Optional[int] = value
return destination
def __lowerCAmelCase (__lowerCAmelCase = None ):
if port is None:
_UpperCAmelCase : Tuple = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 322
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : Dict = "nat"
lowerCAmelCase : List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , lowerCamelCase__ : Tuple=4 , lowerCamelCase__ : Optional[int]=3 , lowerCamelCase__ : Tuple=64 , lowerCamelCase__ : int=[3, 4, 6, 5] , lowerCamelCase__ : Tuple=[2, 4, 8, 16] , lowerCamelCase__ : List[Any]=7 , lowerCamelCase__ : str=3.0 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : int=0.0 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : str=0.0_2 , lowerCamelCase__ : str=1E-5 , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : Any=None , lowerCamelCase__ : Any=None , **lowerCamelCase__ : Any , ) ->str:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = patch_size
_UpperCAmelCase : Union[str, Any] = num_channels
_UpperCAmelCase : str = embed_dim
_UpperCAmelCase : int = depths
_UpperCAmelCase : Tuple = len(lowerCamelCase__ )
_UpperCAmelCase : Any = num_heads
_UpperCAmelCase : Optional[int] = kernel_size
_UpperCAmelCase : Optional[Any] = mlp_ratio
_UpperCAmelCase : int = qkv_bias
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Dict = drop_path_rate
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : str = layer_norm_eps
_UpperCAmelCase : Any = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase : Optional[int] = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
_UpperCAmelCase : Tuple = layer_scale_init_value
_UpperCAmelCase : Tuple = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase : int = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
| 322
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
_UpperCAmelCase : Dict = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase : Any = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_UpperCAmelCase : Optional[int] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
_UpperCAmelCase : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_UpperCAmelCase : List[str] = {"unk_token": "<unk>"}
_UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase__ ) )
_UpperCAmelCase : List[Any] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_UpperCAmelCase : Dict = os.path.join(self.tmpdirname , lowerCamelCase__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , **lowerCamelCase__ : Optional[Any] ) ->List[Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , **lowerCamelCase__ : Optional[int] ) ->List[str]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , **lowerCamelCase__ : Optional[Any] ) ->int:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_UpperCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : int = self.get_rust_tokenizer()
_UpperCAmelCase : List[Any] = self.get_image_processor()
_UpperCAmelCase : Dict = OwlViTProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = OwlViTProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Optional[int] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase : int = self.get_image_processor(do_normalize=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : int = self.get_image_processor()
_UpperCAmelCase : List[str] = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = OwlViTProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : int = self.prepare_image_inputs()
_UpperCAmelCase : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="np" )
_UpperCAmelCase : Union[str, Any] = processor(images=lowerCamelCase__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self : int ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = OwlViTProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : Any = "lower newer"
_UpperCAmelCase : Dict = processor(text=lowerCamelCase__ , return_tensors="np" )
_UpperCAmelCase : List[Any] = tokenizer(lowerCamelCase__ , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def lowerCAmelCase__ ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.get_image_processor()
_UpperCAmelCase : List[str] = self.get_tokenizer()
_UpperCAmelCase : int = OwlViTProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = "lower newer"
_UpperCAmelCase : Union[str, Any] = self.prepare_image_inputs()
_UpperCAmelCase : Optional[int] = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = "google/owlvit-base-patch32"
_UpperCAmelCase : Dict = OwlViTProcessor.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : Any = ["cat", "nasa badge"]
_UpperCAmelCase : List[Any] = processor(text=lowerCamelCase__ )
_UpperCAmelCase : Dict = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def lowerCAmelCase__ ( self : List[str] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = "google/owlvit-base-patch32"
_UpperCAmelCase : Dict = OwlViTProcessor.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : Dict = [["cat", "nasa badge"], ["person"]]
_UpperCAmelCase : Tuple = processor(text=lowerCamelCase__ )
_UpperCAmelCase : int = 16
_UpperCAmelCase : Any = len(lowerCamelCase__ )
_UpperCAmelCase : Any = max([len(lowerCamelCase__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = "google/owlvit-base-patch32"
_UpperCAmelCase : Any = OwlViTProcessor.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = ["cat", "nasa badge"]
_UpperCAmelCase : Optional[Any] = processor(text=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = 16
_UpperCAmelCase : str = inputs["input_ids"]
_UpperCAmelCase : List[Any] = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = self.get_image_processor()
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Any = OwlViTProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : int = self.prepare_image_inputs()
_UpperCAmelCase : Any = self.prepare_image_inputs()
_UpperCAmelCase : List[str] = processor(images=lowerCamelCase__ , query_images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Dict = self.get_image_processor()
_UpperCAmelCase : List[Any] = self.get_tokenizer()
_UpperCAmelCase : List[Any] = OwlViTProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : str = processor.batch_decode(lowerCamelCase__ )
_UpperCAmelCase : List[str] = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
| 322
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
lowerCamelCase__ = int(input('Enter number: ').strip())
print(F'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
| 322
| 1
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[str] = len(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322
|
'''simple docstring'''
from collections.abc import Sequence
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return sum(c * (x**i) for i, c in enumerate(__lowerCAmelCase ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = 0.0
for coeff in reversed(__lowerCAmelCase ):
_UpperCAmelCase : int = result * x + coeff
return result
if __name__ == "__main__":
lowerCamelCase__ = (0.0, 0.0, 5.0, 9.3, 7.0)
lowerCamelCase__ = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 322
| 1
|
'''simple docstring'''
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCamelCase__ = getLogger(__name__)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 8 , __lowerCAmelCase = 1_024 , __lowerCAmelCase="val" , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase="summarization" , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase = None , __lowerCAmelCase="" , **__lowerCAmelCase , ):
_UpperCAmelCase : Optional[Any] = str(__lowerCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend="nccl" , rank=__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = Path(__lowerCAmelCase )
_UpperCAmelCase : List[str] = save_dir.joinpath(F"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(__lowerCAmelCase )
_UpperCAmelCase : str = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase ).cuda()
if fpaa:
_UpperCAmelCase : str = model.half()
# determine if we need to increase num_beams
use_task_specific_params(__lowerCAmelCase , __lowerCAmelCase ) # update config with task specific params
_UpperCAmelCase : Any = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
_UpperCAmelCase : Tuple = num_return_sequences
_UpperCAmelCase : str = AutoTokenizer.from_pretrained(__lowerCAmelCase )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
_UpperCAmelCase : str = tokenizer.model_max_length
if prefix is None:
_UpperCAmelCase : Optional[int] = prefix or getattr(model.config , "prefix" , "" ) or ""
_UpperCAmelCase : Dict = SeqaSeqDataset(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , max_target_length=1_024 , type_path=__lowerCAmelCase , n_obs=__lowerCAmelCase , prefix=__lowerCAmelCase , **__lowerCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
_UpperCAmelCase : Union[str, Any] = ds.make_sortish_sampler(__lowerCAmelCase , distributed=__lowerCAmelCase , add_extra_examples=__lowerCAmelCase , shuffle=__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=__lowerCAmelCase , collate_fn=ds.collate_fn )
_UpperCAmelCase : int = []
for batch in tqdm(__lowerCAmelCase ):
_UpperCAmelCase : Any = model.generate(
input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=__lowerCAmelCase , num_beams=__lowerCAmelCase , **__lowerCAmelCase , )
_UpperCAmelCase : Dict = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
_UpperCAmelCase : int = batch["ids"]
if num_return_sequences > 1:
_UpperCAmelCase : int = chunks(__lowerCAmelCase , __lowerCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(__lowerCAmelCase ):
results.append({"pred": pred, "id": ids[i].item()} )
save_json(__lowerCAmelCase , __lowerCAmelCase )
return results, sampler.num_replicas
def __lowerCAmelCase ():
_UpperCAmelCase : int = argparse.ArgumentParser(
epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" )
parser.add_argument("--data_dir" , type=__lowerCAmelCase , help="like cnn_dm/test.source" )
parser.add_argument(
"--model_name" , type=__lowerCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , )
parser.add_argument("--save_dir" , type=__lowerCAmelCase , help="where to save" , default="tmp_gen" )
parser.add_argument("--max_source_length" , type=__lowerCAmelCase , default=__lowerCAmelCase )
parser.add_argument(
"--type_path" , type=__lowerCAmelCase , default="test" , help="which subset to evaluate typically train/val/test" )
parser.add_argument("--task" , type=__lowerCAmelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=__lowerCAmelCase , default=8 , required=__lowerCAmelCase , help="batch size" )
parser.add_argument(
"--local_rank" , type=__lowerCAmelCase , default=-1 , required=__lowerCAmelCase , help="should be passed by distributed.launch" )
parser.add_argument(
"--n_obs" , type=__lowerCAmelCase , default=__lowerCAmelCase , required=__lowerCAmelCase , help="How many observations. Defaults to all." )
parser.add_argument(
"--num_return_sequences" , type=__lowerCAmelCase , default=1 , required=__lowerCAmelCase , help="How many sequences to return" )
parser.add_argument(
"--sync_timeout" , type=__lowerCAmelCase , default=600 , required=__lowerCAmelCase , help="How long should master process wait for other processes to finish." , )
parser.add_argument("--src_lang" , type=__lowerCAmelCase , default=__lowerCAmelCase , required=__lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=__lowerCAmelCase , default=__lowerCAmelCase , required=__lowerCAmelCase )
parser.add_argument(
"--prefix" , type=__lowerCAmelCase , required=__lowerCAmelCase , default=__lowerCAmelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--debug" , action="store_true" )
_UpperCAmelCase : Optional[int] = time.time()
_UpperCAmelCase , _UpperCAmelCase : str = parser.parse_known_args()
_UpperCAmelCase : Optional[Any] = parse_numeric_n_bool_cl_kwargs(__lowerCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(F"""parsed the following generate kwargs: {generate_kwargs}""" )
_UpperCAmelCase : Optional[Any] = Path(args.save_dir + "_tmp" )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) # this handles locking.
_UpperCAmelCase : Any = list(json_save_dir.glob("rank_*.json" ) )
if intermediate_files:
raise ValueError(F"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
_UpperCAmelCase : List[Any] = {}
if args.src_lang is not None:
_UpperCAmelCase : Union[str, Any] = args.src_lang
if args.tgt_lang is not None:
_UpperCAmelCase : Optional[Any] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = eval_data_dir(
args.data_dir , __lowerCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=__lowerCAmelCase , **__lowerCAmelCase , )
if args.local_rank <= 0:
_UpperCAmelCase : str = Path(args.save_dir )
save_dir.mkdir(exist_ok=__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = gather_results_from_each_node(__lowerCAmelCase , __lowerCAmelCase , args.sync_timeout )
_UpperCAmelCase : str = combine_partial_results(__lowerCAmelCase )
if args.num_return_sequences > 1:
_UpperCAmelCase : Optional[int] = save_dir.joinpath("pseudolabel_results.json" )
print(F"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(__lowerCAmelCase , __lowerCAmelCase )
return
_UpperCAmelCase : Dict = Path(args.data_dir ).joinpath(args.type_path + ".target" )
with open(__lowerCAmelCase ) as f:
_UpperCAmelCase : int = [x.rstrip() for x in f.readlines()][: len(__lowerCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
_UpperCAmelCase : str = "translation" in args.task
_UpperCAmelCase : Optional[int] = calculate_bleu if calc_bleu else calculate_rouge
_UpperCAmelCase : Optional[int] = "bleu" if calc_bleu else "rouge"
_UpperCAmelCase : Dict = score_fn(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = len(__lowerCAmelCase )
_UpperCAmelCase : List[str] = time.time() - start_time
_UpperCAmelCase : str = round(runtime / metrics["n_obs"] , 4 )
_UpperCAmelCase : int = num_replicas
# TODO(@stas00): add whatever metadata to metrics
_UpperCAmelCase : Optional[Any] = save_dir.joinpath(F"""{args.type_path}_{metric_name}.json""" )
save_json(__lowerCAmelCase , __lowerCAmelCase , indent=__lowerCAmelCase )
print(__lowerCAmelCase )
write_txt_file(__lowerCAmelCase , save_dir.joinpath(F"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(__lowerCAmelCase , save_dir.joinpath(F"""{args.type_path}.target""" ) )
else:
shutil.rmtree(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = []
for partial_result in partial_results:
records.extend(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x["id"] )
_UpperCAmelCase : Dict = [x["pred"] for x in records]
return preds
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# WAIT FOR lots of .json files
_UpperCAmelCase : List[str] = time.time()
logger.info("waiting for all nodes to finish" )
_UpperCAmelCase : Optional[Any] = None
while (time.time() - start_wait) < timeout:
_UpperCAmelCase : Optional[int] = list(save_dir.glob("rank_*.json" ) )
if len(__lowerCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
_UpperCAmelCase : Union[str, Any] = lmap(__lowerCAmelCase , __lowerCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("Rank 0 gave up on waiting for other processes" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 322
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
_UpperCAmelCase : Tuple = sum(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_UpperCAmelCase : Any = True
for i in range(1 , s + 1 ):
_UpperCAmelCase : List[Any] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_UpperCAmelCase : Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
_UpperCAmelCase : Any = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_UpperCAmelCase : List[Any] = s - 2 * j
break
return diff
| 322
| 1
|
'''simple docstring'''
import baseaa
def __lowerCAmelCase (__lowerCAmelCase ):
return baseaa.baaencode(string.encode("utf-8" ) )
def __lowerCAmelCase (__lowerCAmelCase ):
return baseaa.baadecode(__lowerCAmelCase ).decode("utf-8" )
if __name__ == "__main__":
lowerCamelCase__ = 'Hello World!'
lowerCamelCase__ = baseaa_encode(test)
print(encoded)
lowerCamelCase__ = baseaa_decode(encoded)
print(decoded)
| 322
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "resnet"
lowerCAmelCase : Union[str, Any] = ["basic", "bottleneck"]
def __init__( self : Dict , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Any=64 , lowerCamelCase__ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase__ : int=[3, 4, 6, 3] , lowerCamelCase__ : Dict="bottleneck" , lowerCamelCase__ : Dict="relu" , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Any=None , lowerCamelCase__ : int=None , **lowerCamelCase__ : Tuple , ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : List[str] = embedding_size
_UpperCAmelCase : Tuple = hidden_sizes
_UpperCAmelCase : Dict = depths
_UpperCAmelCase : List[Any] = layer_type
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Tuple = downsample_in_first_stage
_UpperCAmelCase : str = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[Any] = version.parse("1.11" )
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self : str ) ->float:
'''simple docstring'''
return 1E-3
| 322
| 1
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = credit_card_number
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : str = len(__lowerCAmelCase ) - 2
for i in range(__lowerCAmelCase , -1 , -2 ):
# double the value of every second digit
_UpperCAmelCase : Tuple = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_UpperCAmelCase : Tuple = cc_number[:i] + str(__lowerCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__lowerCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = F"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(F"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(__lowerCAmelCase ) <= 16:
print(F"""{error_message} of its length.""" )
return False
if not validate_initial_digits(__lowerCAmelCase ):
print(F"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(__lowerCAmelCase ):
print(F"""{error_message} it fails the Luhn check.""" )
return False
print(F"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 322
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __lowerCAmelCase (__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
_UpperCAmelCase : int = [image]
_UpperCAmelCase : str = [trans(img.convert("RGB" ) ) for img in image]
_UpperCAmelCase : Optional[Any] = torch.stack(__lowerCAmelCase )
return image
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : int ) ->int:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str ) ->Union[str, Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = min(int(num_inference_steps * strength ) , lowerCamelCase__ )
_UpperCAmelCase : str = max(num_inference_steps - init_timestep , 0 )
_UpperCAmelCase : List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any]=None ) ->str:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase__ )}""" )
_UpperCAmelCase : Union[str, Any] = image.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_UpperCAmelCase : List[str] = init_latents.shape
_UpperCAmelCase : Optional[int] = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ )
# get latents
print("add noise to latents at timestep" , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.scheduler.add_noise(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , lowerCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image] = None , lowerCamelCase__ : float = 0.8 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , ) ->Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowerCamelCase__ )
# 2. Preprocess image
_UpperCAmelCase : Dict = preprocess(lowerCamelCase__ )
# 3. set timesteps
self.scheduler.set_timesteps(lowerCamelCase__ , device=self.device )
_UpperCAmelCase , _UpperCAmelCase : Any = self.get_timesteps(lowerCamelCase__ , lowerCamelCase__ , self.device )
_UpperCAmelCase : List[Any] = timesteps[:1].repeat(lowerCamelCase__ )
# 4. Prepare latent variables
_UpperCAmelCase : Optional[int] = self.prepare_latents(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.unet.dtype , self.device , lowerCamelCase__ )
_UpperCAmelCase : Any = latents
# 5. Denoising loop
for t in self.progress_bar(lowerCamelCase__ ):
# 1. predict noise model_output
_UpperCAmelCase : Union[str, Any] = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase : int = self.scheduler.step(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , eta=lowerCamelCase__ , use_clipped_model_output=lowerCamelCase__ , generator=lowerCamelCase__ , ).prev_sample
_UpperCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowerCamelCase__ )
| 322
| 1
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
_UpperCAmelCase : List[Any] = sorted(string.lower() )
return len(__lowerCAmelCase ) == len(set(__lowerCAmelCase ) )
if __name__ == "__main__":
lowerCamelCase__ = input('Enter a string ').strip()
lowerCamelCase__ = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 322
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
lowerCamelCase__ = list[list[float | int]]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : float
for row in range(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = matrix[row][col]
_UpperCAmelCase : Optional[int] = vector[row][0]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCAmelCase , __lowerCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase : str = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __lowerCAmelCase ):
for row in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCAmelCase )
]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix = [[0] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
for x_val, y_val in enumerate(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase : int = y_val
_UpperCAmelCase : List[str] = solve(__lowerCAmelCase , __lowerCAmelCase )
def interpolated_func(__lowerCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCAmelCase ) )
return interpolated_func
def __lowerCAmelCase (__lowerCAmelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __lowerCAmelCase (__lowerCAmelCase = question_function , __lowerCAmelCase = 10 ):
_UpperCAmelCase : list[int] = [func(__lowerCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Callable[[int], int]
_UpperCAmelCase : int
for poly in polynomials:
_UpperCAmelCase : int = 1
while func(__lowerCAmelCase ) == poly(__lowerCAmelCase ):
x_val += 1
ret += poly(__lowerCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Dict = "falcon"
lowerCAmelCase : Union[str, Any] = ["past_key_values"]
def __init__( self : Optional[Any] , lowerCamelCase__ : Any=6_50_24 , lowerCamelCase__ : Optional[Any]=45_44 , lowerCamelCase__ : Union[str, Any]=32 , lowerCamelCase__ : List[Any]=71 , lowerCamelCase__ : Any=1E-5 , lowerCamelCase__ : Tuple=0.0_2 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=0.0 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : int=True , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : str=11 , lowerCamelCase__ : List[Any]=11 , **lowerCamelCase__ : List[str] , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : Any = kwargs.pop("n_embed" , lowerCamelCase__ )
_UpperCAmelCase : Dict = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : List[Any] = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : Tuple = layer_norm_epsilon
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : int = use_cache
_UpperCAmelCase : List[str] = hidden_dropout
_UpperCAmelCase : int = attention_dropout
_UpperCAmelCase : List[str] = bos_token_id
_UpperCAmelCase : Tuple = eos_token_id
_UpperCAmelCase : List[Any] = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase : Union[str, Any] = alibi
_UpperCAmelCase : str = new_decoder_architecture
_UpperCAmelCase : int = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase : Any = parallel_attn
_UpperCAmelCase : Tuple = bias
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
@property
def lowerCAmelCase__ ( self : Any ) ->str:
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return not self.alibi
| 322
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 322
| 1
|
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __lowerCAmelCase (__lowerCAmelCase ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
def __lowerCAmelCase (__lowerCAmelCase ):
# word like '180' or '身高' or '神'
for char in word:
_UpperCAmelCase : Union[str, Any] = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = set()
for token in tokens:
_UpperCAmelCase : Union[str, Any] = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
_UpperCAmelCase : str = list(__lowerCAmelCase )
return word_list
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if not chinese_word_set:
return bert_tokens
_UpperCAmelCase : int = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
_UpperCAmelCase : Optional[int] = bert_tokens
_UpperCAmelCase , _UpperCAmelCase : Dict = 0, len(__lowerCAmelCase )
while start < end:
_UpperCAmelCase : List[Any] = True
if is_chinese(bert_word[start] ):
_UpperCAmelCase : Optional[Any] = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
_UpperCAmelCase : Tuple = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_UpperCAmelCase : Any = "##" + bert_word[j]
_UpperCAmelCase : List[Any] = start + i
_UpperCAmelCase : Dict = False
break
if single_word:
start += 1
return bert_word
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
_UpperCAmelCase : Union[str, Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["cws"] ).cws
_UpperCAmelCase : Union[str, Any] = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = []
for i in range(0 , len(__lowerCAmelCase ) , 100 ):
_UpperCAmelCase : List[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = []
for id in input_ids:
_UpperCAmelCase : Optional[Any] = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Dict = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
_UpperCAmelCase : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def __lowerCAmelCase (__lowerCAmelCase ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , "r" , encoding="utf-8" ) as f:
_UpperCAmelCase : Optional[int] = f.readlines()
_UpperCAmelCase : Union[str, Any] = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_UpperCAmelCase : str = LTP(args.ltp ) # faster in GPU device
_UpperCAmelCase : List[Any] = BertTokenizer.from_pretrained(args.bert )
_UpperCAmelCase : Any = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase : Optional[int] = [json.dumps(__lowerCAmelCase ) + "\n" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
lowerCamelCase__ = parser.parse_args()
main(args)
| 322
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar('T')
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : T ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = data
_UpperCAmelCase : Node[T] | None = None
def __str__( self : Any ) ->str:
'''simple docstring'''
return F"""{self.data}"""
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Tuple ) ->None:
'''simple docstring'''
_UpperCAmelCase : Node[T] | None = None
def __iter__( self : List[str] ) ->Iterator[T]:
'''simple docstring'''
_UpperCAmelCase : Any = self.top
while node:
yield node.data
_UpperCAmelCase : Dict = node.next
def __str__( self : Dict ) ->str:
'''simple docstring'''
return "->".join([str(lowerCamelCase__ ) for item in self] )
def __len__( self : Optional[int] ) ->int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def lowerCAmelCase__ ( self : List[Any] ) ->bool:
'''simple docstring'''
return self.top is None
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : T ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = Node(lowerCamelCase__ )
if not self.is_empty():
_UpperCAmelCase : Tuple = self.top
_UpperCAmelCase : List[str] = node
def lowerCAmelCase__ ( self : Union[str, Any] ) ->T:
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.top
_UpperCAmelCase : Optional[Any] = self.top.next
return pop_node.data
def lowerCAmelCase__ ( self : Union[str, Any] ) ->T:
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def lowerCAmelCase__ ( self : List[Any] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 322
| 1
|
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
debug_launcher(test_script.main )
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
'''simple docstring'''
debug_launcher(test_ops.main )
| 322
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : int = "speech_to_text_2"
lowerCAmelCase : str = ["past_key_values"]
lowerCAmelCase : int = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[Any] , lowerCamelCase__ : Tuple=1_00_00 , lowerCamelCase__ : Any=6 , lowerCamelCase__ : Tuple=20_48 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple="relu" , lowerCamelCase__ : Dict=2_56 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : List[Any]=0.0_2 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Any=1 , lowerCamelCase__ : int=0 , lowerCamelCase__ : str=2 , lowerCamelCase__ : List[Any]=10_24 , **lowerCamelCase__ : str , ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Optional[int] = d_model
_UpperCAmelCase : List[Any] = decoder_ffn_dim
_UpperCAmelCase : Any = decoder_layers
_UpperCAmelCase : int = decoder_attention_heads
_UpperCAmelCase : Any = dropout
_UpperCAmelCase : List[Any] = attention_dropout
_UpperCAmelCase : Optional[int] = activation_dropout
_UpperCAmelCase : List[Any] = activation_function
_UpperCAmelCase : int = init_std
_UpperCAmelCase : Dict = decoder_layerdrop
_UpperCAmelCase : str = use_cache
_UpperCAmelCase : Union[str, Any] = decoder_layers
_UpperCAmelCase : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase : Any = max_target_positions
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
| 322
| 1
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCamelCase__ = ['text', 'image', 'audio']
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
inputs.append(create_inputs(__lowerCAmelCase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Any = []
for output in outputs:
if isinstance(__lowerCAmelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(__lowerCAmelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(__lowerCAmelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class lowerCAmelCase__ :
def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
_UpperCAmelCase : int = self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCamelCase__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
_UpperCAmelCase : int = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = create_inputs(self.tool.inputs )
_UpperCAmelCase : Tuple = self.tool(*lowerCamelCase__ )
# There is a single output
if len(self.tool.outputs ) == 1:
_UpperCAmelCase : Union[str, Any] = [outputs]
self.assertListEqual(output_types(lowerCamelCase__ ) , self.tool.outputs )
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = create_inputs(self.tool.inputs )
_UpperCAmelCase : str = self.tool(*lowerCamelCase__ )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : int = [outputs]
self.assertEqual(len(lowerCamelCase__ ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCamelCase__ , self.tool.outputs ):
_UpperCAmelCase : List[Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCamelCase__ , lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = create_inputs(self.tool.inputs )
_UpperCAmelCase : List[Any] = []
for _input, input_type in zip(lowerCamelCase__ , self.tool.inputs ):
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
_UpperCAmelCase : List[Any] = self.tool(*lowerCamelCase__ )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : List[Any] = [outputs]
self.assertEqual(len(lowerCamelCase__ ) , len(self.tool.outputs ) )
| 322
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=100 , __lowerCAmelCase=" " ):
_UpperCAmelCase : Any = text.split(__lowerCAmelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase : Dict = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(__lowerCAmelCase ):
titles.append(title if title is not None else "" )
texts.append(__lowerCAmelCase )
return {"title": titles, "text": texts}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : str = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=__lowerCAmelCase , padding="longest" , return_tensors="pt" )["input_ids"]
_UpperCAmelCase : str = ctx_encoder(input_ids.to(device=__lowerCAmelCase ) , return_dict=__lowerCAmelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
######################################
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_UpperCAmelCase : Optional[int] = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_UpperCAmelCase : Optional[int] = dataset.map(__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
_UpperCAmelCase : Union[str, Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_UpperCAmelCase : Dict = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
_UpperCAmelCase : int = dataset.map(
partial(__lowerCAmelCase , ctx_encoder=__lowerCAmelCase , ctx_tokenizer=__lowerCAmelCase ) , batched=__lowerCAmelCase , batch_size=processing_args.batch_size , features=__lowerCAmelCase , )
# And finally save your dataset
_UpperCAmelCase : List[Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(__lowerCAmelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_UpperCAmelCase : Any = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=__lowerCAmelCase )
# And save the index
_UpperCAmelCase : List[str] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(__lowerCAmelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : str = field(
default=str(Path(UpperCAmelCase__ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
lowerCAmelCase : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
lowerCAmelCase : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
lowerCAmelCase : Optional[str] = field(
default=str(Path(UpperCAmelCase__ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
lowerCAmelCase : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
lowerCAmelCase : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 322
| 1
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
set_seed(770)
lowerCamelCase__ = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
lowerCamelCase__ = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
lowerCamelCase__ = os.path.dirname(os.path.abspath(__file__))
lowerCamelCase__ = os.path.join(os.path.expanduser('~'), '.cache')
lowerCamelCase__ = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=False ):
_UpperCAmelCase : Any = model_type
if use_small:
key += "_small"
return os.path.join(__lowerCAmelCase , REMOTE_MODEL_PATHS[key]["file_name"] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
hf_hub_download(repo_id=__lowerCAmelCase , filename=__lowerCAmelCase , local_dir=__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase="text" ):
if model_type == "text":
_UpperCAmelCase : Union[str, Any] = BarkSemanticModel
_UpperCAmelCase : Optional[int] = BarkSemanticConfig
_UpperCAmelCase : List[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
_UpperCAmelCase : Any = BarkCoarseModel
_UpperCAmelCase : Dict = BarkCoarseConfig
_UpperCAmelCase : Union[str, Any] = BarkCoarseGenerationConfig
elif model_type == "fine":
_UpperCAmelCase : List[Any] = BarkFineModel
_UpperCAmelCase : List[Any] = BarkFineConfig
_UpperCAmelCase : List[Any] = BarkFineGenerationConfig
else:
raise NotImplementedError()
_UpperCAmelCase : Optional[Any] = F"""{model_type}_small""" if use_small else model_type
_UpperCAmelCase : Optional[int] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__lowerCAmelCase ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
_UpperCAmelCase : int = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
# this is a hack
_UpperCAmelCase : Optional[int] = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
_UpperCAmelCase : Optional[int] = model_args["vocab_size"]
_UpperCAmelCase : str = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_UpperCAmelCase : Optional[Any] = model_args.pop("n_head" )
_UpperCAmelCase : Tuple = model_args.pop("n_embd" )
_UpperCAmelCase : Tuple = model_args.pop("n_layer" )
_UpperCAmelCase : Optional[Any] = ConfigClass(**checkpoint["model_args"] )
_UpperCAmelCase : str = ModelClass(config=__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = GenerationConfigClass()
_UpperCAmelCase : List[Any] = model_generation_config
_UpperCAmelCase : Any = checkpoint["model"]
# fixup checkpoint
_UpperCAmelCase : List[str] = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(__lowerCAmelCase ):
# replace part of the key with corresponding layer name in HF implementation
_UpperCAmelCase : Optional[Any] = k[len(__lowerCAmelCase ) :]
for old_layer_name in new_layer_name_dict:
_UpperCAmelCase : int = new_k.replace(__lowerCAmelCase , new_layer_name_dict[old_layer_name] )
_UpperCAmelCase : Optional[int] = state_dict.pop(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = set(state_dict.keys() ) - set(model.state_dict().keys() )
_UpperCAmelCase : List[Any] = {k for k in extra_keys if not k.endswith(".attn.bias" )}
_UpperCAmelCase : Union[str, Any] = set(model.state_dict().keys() ) - set(state_dict.keys() )
_UpperCAmelCase : Tuple = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(__lowerCAmelCase ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(__lowerCAmelCase ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
_UpperCAmelCase : Dict = model.num_parameters(exclude_embeddings=__lowerCAmelCase )
_UpperCAmelCase : str = checkpoint["best_val_loss"].item()
logger.info(F"""model loaded: {round(n_params/1e6 , 1 )}M params, {round(__lowerCAmelCase , 3 )} loss""" )
model.eval()
model.to(__lowerCAmelCase )
del checkpoint, state_dict
return model
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_UpperCAmelCase : Any = "cpu" # do conversion on cpu
_UpperCAmelCase : Optional[Any] = _get_ckpt_path(__lowerCAmelCase , use_small=__lowerCAmelCase )
_UpperCAmelCase : Tuple = _load_model(__lowerCAmelCase , __lowerCAmelCase , model_type=__lowerCAmelCase , use_small=__lowerCAmelCase )
# load bark initial model
_UpperCAmelCase : int = _bark_load_model(__lowerCAmelCase , "cpu" , model_type=__lowerCAmelCase , use_small=__lowerCAmelCase )
if model_type == "text":
_UpperCAmelCase : Union[str, Any] = bark_model["model"]
if model.num_parameters(exclude_embeddings=__lowerCAmelCase ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
_UpperCAmelCase : Any = 5
_UpperCAmelCase : List[str] = 10
if model_type in ["text", "coarse"]:
_UpperCAmelCase : List[str] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
_UpperCAmelCase : Any = bark_model(__lowerCAmelCase )[0]
_UpperCAmelCase : Optional[Any] = model(__lowerCAmelCase )
# take last logits
_UpperCAmelCase : Optional[int] = output_new_model_total.logits[:, [-1], :]
else:
_UpperCAmelCase : Union[str, Any] = 3
_UpperCAmelCase : List[str] = 8
_UpperCAmelCase : Union[str, Any] = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_UpperCAmelCase : str = model(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Any = bark_model(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : List[Any] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
_UpperCAmelCase : Union[str, Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = BarkSemanticConfig.from_pretrained(os.path.join(__lowerCAmelCase , "config.json" ) )
_UpperCAmelCase : Union[str, Any] = BarkCoarseConfig.from_pretrained(os.path.join(__lowerCAmelCase , "config.json" ) )
_UpperCAmelCase : Union[str, Any] = BarkFineConfig.from_pretrained(os.path.join(__lowerCAmelCase , "config.json" ) )
_UpperCAmelCase : List[str] = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
_UpperCAmelCase : Any = BarkSemanticModel.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : Dict = BarkCoarseModel.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : Tuple = BarkFineModel.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_24khz" )
_UpperCAmelCase : Tuple = BarkConfig.from_sub_model_configs(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Any = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_UpperCAmelCase : int = BarkModel(__lowerCAmelCase )
_UpperCAmelCase : int = semantic
_UpperCAmelCase : List[Any] = coarseAcoustic
_UpperCAmelCase : str = fineAcoustic
_UpperCAmelCase : Union[str, Any] = codec
_UpperCAmelCase : List[Any] = bark_generation_config
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
bark.save_pretrained(__lowerCAmelCase , repo_id=__lowerCAmelCase , push_to_hub=__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
lowerCamelCase__ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 322
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase__ = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
@classmethod
def lowerCAmelCase__ ( cls : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] ) ->int:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
_UpperCAmelCase : List[str] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ , repo_id="test-config" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_UpperCAmelCase : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
_UpperCAmelCase : List[str] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase__ , repo_id="valid_org/test-config-org" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_UpperCAmelCase : int = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
CustomConfig.register_for_auto_class()
_UpperCAmelCase : int = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
_UpperCAmelCase : str = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_UpperCAmelCase : Any = c.n_embd + 1 # int
_UpperCAmelCase : List[Any] = c.resid_pdrop + 1.0 # float
_UpperCAmelCase : Tuple = not c.scale_attn_weights # bool
_UpperCAmelCase : List[Any] = c.summary_type + "foo" # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(lowerCamelCase__ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(lowerCamelCase__ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(lowerCamelCase__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(lowerCamelCase__ , c.summary_type , "mismatch for key: summary_type" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = PretrainedConfig()
_UpperCAmelCase : Tuple = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
_UpperCAmelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase__ , lowerCamelCase__ )]
if len(lowerCamelCase__ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F""" {', '.join(lowerCamelCase__ )}.""" )
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCAmelCase : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
_UpperCAmelCase : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = mock.Mock()
_UpperCAmelCase : List[str] = 5_00
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : Tuple = HTTPError
_UpperCAmelCase : Any = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase : int = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=lowerCamelCase__ ) as mock_head:
_UpperCAmelCase : Union[str, Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = AutoConfig.from_pretrained("bert-base-cased" )
_UpperCAmelCase : str = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase : Dict = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCamelCase__ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_UpperCAmelCase : Dict = ["config.42.0.0.json"]
_UpperCAmelCase : Union[str, Any] = 7_68
configuration.save_pretrained(lowerCamelCase__ )
shutil.move(os.path.join(lowerCamelCase__ , "config.4.0.0.json" ) , os.path.join(lowerCamelCase__ , "config.42.0.0.json" ) )
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def lowerCAmelCase__ ( self : List[str] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
_UpperCAmelCase : Any = "v4.0.0"
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_UpperCAmelCase : List[Any] = "v3.0.0"
_UpperCAmelCase : int = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 322
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[Any] = ["pixel_values"]
def __init__( self : Optional[int] , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[int, float] = 1 / 2_55 , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : bool = True , **lowerCamelCase__ : Dict , ) ->None:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = size if size is not None else {"shortest_edge": 2_24}
_UpperCAmelCase : List[Any] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
_UpperCAmelCase : Union[str, Any] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ , param_name="crop_size" )
_UpperCAmelCase : int = do_resize
_UpperCAmelCase : Tuple = size
_UpperCAmelCase : str = resample
_UpperCAmelCase : List[str] = do_center_crop
_UpperCAmelCase : int = crop_size
_UpperCAmelCase : Dict = do_rescale
_UpperCAmelCase : Any = rescale_factor
_UpperCAmelCase : Optional[int] = do_normalize
_UpperCAmelCase : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCAmelCase : str = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCAmelCase : Tuple = do_convert_rgb
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : List[Any] , ) ->np.ndarray:
'''simple docstring'''
_UpperCAmelCase : List[str] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_UpperCAmelCase : List[str] = get_resize_output_image_size(lowerCamelCase__ , size=size["shortest_edge"] , default_to_square=lowerCamelCase__ )
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : str , ) ->np.ndarray:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(lowerCamelCase__ , size=(size["height"], size["width"]) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[int, float] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : int , ) ->Any:
'''simple docstring'''
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Optional[Any] , ) ->np.ndarray:
'''simple docstring'''
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : ImageInput , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : int = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : float = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowerCamelCase__ : List[str] , ) ->PIL.Image.Image:
'''simple docstring'''
_UpperCAmelCase : List[Any] = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : Optional[int] = size if size is not None else self.size
_UpperCAmelCase : Optional[Any] = get_size_dict(lowerCamelCase__ , param_name="size" , default_to_square=lowerCamelCase__ )
_UpperCAmelCase : Tuple = resample if resample is not None else self.resample
_UpperCAmelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase : int = get_size_dict(lowerCamelCase__ , param_name="crop_size" , default_to_square=lowerCamelCase__ )
_UpperCAmelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : Dict = image_std if image_std is not None else self.image_std
_UpperCAmelCase : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCAmelCase : Tuple = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCAmelCase : Optional[int] = [convert_to_rgb(lowerCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
_UpperCAmelCase : Tuple = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
_UpperCAmelCase : Tuple = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
_UpperCAmelCase : List[Any] = [self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images]
if do_rescale:
_UpperCAmelCase : int = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
_UpperCAmelCase : Any = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
_UpperCAmelCase : str = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
_UpperCAmelCase : str = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 322
|
'''simple docstring'''
from manim import *
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : List[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase : Optional[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Dict = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : str = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[Any] = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : int = Text("CPU" , font_size=24 )
_UpperCAmelCase : Any = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(1 )]
_UpperCAmelCase : str = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : int = Text("GPU" , font_size=24 )
_UpperCAmelCase : str = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
gpu.align_to(lowerCamelCase__ , lowerCamelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Any = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[int] = Text("Model" , font_size=24 )
_UpperCAmelCase : Tuple = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , )
_UpperCAmelCase : int = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
_UpperCAmelCase : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase : Union[str, Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ , run_time=2.5 ) , Write(lowerCamelCase__ ) , Write(lowerCamelCase__ ) )
self.add(lowerCamelCase__ )
_UpperCAmelCase : int = []
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Dict = []
for i, rect in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : int = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 )
cpu_target.move_to(lowerCamelCase__ )
cpu_target.generate_target()
_UpperCAmelCase : Dict = 0.4_6 / 4
_UpperCAmelCase : Any = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCamelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase__ , buff=0.0 )
cpu_targs.append(lowerCamelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase__ ) )
second_animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(*lowerCamelCase__ )
self.wait()
| 322
| 1
|
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase__ = logging.getLogger()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Tuple = "\n".join(__lowerCAmelCase )
Path(__lowerCAmelCase ).open("w" ).writelines(__lowerCAmelCase )
lowerCamelCase__ = 'patrickvonplaten/t5-tiny-random'
lowerCamelCase__ = 'sshleifer/bart-tiny-random'
lowerCamelCase__ = 'sshleifer/tiny-mbart'
lowerCamelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : List[str] = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
_UpperCAmelCase : Dict = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
_UpperCAmelCase : List[str] = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Dict = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
_UpperCAmelCase : Optional[Any] = "translation_en_to_de" if model == T5_TINY else "summarization"
_UpperCAmelCase : Any = F"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(lowerCamelCase__ , "argv" , lowerCamelCase__ ):
run_generate()
assert Path(lowerCamelCase__ ).exists()
# os.remove(Path(output_file_name))
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
self.run_eval_tester(lowerCamelCase__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[Any] ) ->Dict:
'''simple docstring'''
self.run_eval_tester(lowerCamelCase__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
_UpperCAmelCase : Optional[int] = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
_UpperCAmelCase : int = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
_UpperCAmelCase : Tuple = Path(self.get_auto_remove_tmp_dir() )
_UpperCAmelCase : Any = str(tmp_dir / "scores.json" )
_UpperCAmelCase : List[Any] = str(tmp_dir / "val.target" )
_dump_articles(lowerCamelCase__ , text["en"] )
_dump_articles(lowerCamelCase__ , text["de"] )
_UpperCAmelCase : List[str] = "translation_en_to_de" if model == T5_TINY else "summarization"
_UpperCAmelCase : Optional[int] = F"""
run_eval_search.py
{model}
{str(lowerCamelCase__ )}
{str(lowerCamelCase__ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(lowerCamelCase__ , "argv" , lowerCamelCase__ ):
with CaptureStdout() as cs:
run_search()
_UpperCAmelCase : Dict = [" num_beams | length_penalty", model, "Best score args"]
_UpperCAmelCase : int = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(lowerCamelCase__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCamelCase__ ).exists()
os.remove(Path(lowerCamelCase__ ) )
| 322
|
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1_024 , __lowerCAmelCase=1_024 , __lowerCAmelCase=False , **__lowerCAmelCase ):
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : List[str] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="train" , **__lowerCAmelCase )
_UpperCAmelCase : Dict = tok.pad_token_id
def get_lens(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = tqdm(
DataLoader(__lowerCAmelCase , batch_size=512 , num_workers=8 , shuffle=__lowerCAmelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_UpperCAmelCase : List[str] = []
for batch in dl:
_UpperCAmelCase : Any = batch["input_ids"].ne(__lowerCAmelCase ).sum(1 ).tolist()
_UpperCAmelCase : Tuple = batch["labels"].ne(__lowerCAmelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__lowerCAmelCase , __lowerCAmelCase ):
max_lens.append(max(__lowerCAmelCase , __lowerCAmelCase ) )
else:
max_lens.extend(__lowerCAmelCase )
return max_lens
_UpperCAmelCase : Dict = get_lens(__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="val" , **__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = get_lens(__lowerCAmelCase )
pickle_save(__lowerCAmelCase , train_ds.len_file )
pickle_save(__lowerCAmelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 322
| 1
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowerCamelCase__ = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
_UpperCAmelCase : Optional[int] = self.transformer_dir
shutil.copy(
os.path.join(lowerCamelCase__ , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Dict = "src/transformers"
shutil.rmtree(self.transformer_dir )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str]=None ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase : Optional[Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase : Tuple = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_UpperCAmelCase : Optional[int] = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = os.path.join(self.transformer_dir , "new_code.py" )
with open(lowerCamelCase__ , "w" , newline="\n" ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , "r" ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , lowerCamelCase__ ) , )
# Copy consistency with a really long name
_UpperCAmelCase : str = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , F"""{long_class_name}LMPredictionHead""" , re.sub("Bert" , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , lowerCamelCase__ , overwrite_result=re.sub("Bert" , "TestModel" , lowerCamelCase__ ) , )
def lowerCAmelCase__ ( self : Tuple ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : int = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
_UpperCAmelCase : Dict = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
_UpperCAmelCase : Dict = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_UpperCAmelCase : str = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
_UpperCAmelCase , _UpperCAmelCase : Dict = check_copies.convert_to_localized_md(
lowerCamelCase__ , lowerCamelCase__ , localized_readme["format_model_list"] )
self.assertFalse(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = check_copies.convert_to_localized_md(
lowerCamelCase__ , lowerCamelCase__ , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
_UpperCAmelCase : Tuple = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_UpperCAmelCase : Optional[Any] = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
_UpperCAmelCase , _UpperCAmelCase : List[str] = check_copies.convert_to_localized_md(
lowerCamelCase__ , lowerCamelCase__ , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 322
|
'''simple docstring'''
import pytest
lowerCamelCase__ = '__dummy_dataset1__'
lowerCamelCase__ = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __lowerCAmelCase ():
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __lowerCAmelCase ():
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = dataset_loading_script_name
_UpperCAmelCase : Any = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = script_dir / F"""{script_name}.py"""
with open(__lowerCAmelCase , "w" ) as f:
f.write(__lowerCAmelCase )
return str(__lowerCAmelCase )
| 322
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = analyze_text(__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
_UpperCAmelCase : Optional[int] = sum(single_char_strings.values() )
# one length string
_UpperCAmelCase : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_UpperCAmelCase : Tuple = single_char_strings[ch]
_UpperCAmelCase : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(__lowerCAmelCase ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
_UpperCAmelCase : Tuple = sum(two_char_strings.values() )
_UpperCAmelCase : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_UpperCAmelCase : str = cha + cha
if sequence in two_char_strings:
_UpperCAmelCase : List[str] = two_char_strings[sequence]
_UpperCAmelCase : Any = int(__lowerCAmelCase ) / all_sum
my_sec_sum += prob * math.loga(__lowerCAmelCase )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = Counter() # type: ignore
_UpperCAmelCase : Union[str, Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __lowerCAmelCase ():
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 322
|
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase__ = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCamelCase__ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCamelCase__ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCamelCase__ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] ) ->int:
'''simple docstring'''
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int=0.9 , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : Dict=0.5 ) ->Any:
'''simple docstring'''
if NLTK_VERSION >= version.Version("3.6.5" ):
_UpperCAmelCase : Dict = [
meteor_score.single_meteor_score(
word_tokenize(lowerCamelCase__ ) , word_tokenize(lowerCamelCase__ ) , alpha=lowerCamelCase__ , beta=lowerCamelCase__ , gamma=lowerCamelCase__ )
for ref, pred in zip(lowerCamelCase__ , lowerCamelCase__ )
]
else:
_UpperCAmelCase : Optional[int] = [
meteor_score.single_meteor_score(lowerCamelCase__ , lowerCamelCase__ , alpha=lowerCamelCase__ , beta=lowerCamelCase__ , gamma=lowerCamelCase__ )
for ref, pred in zip(lowerCamelCase__ , lowerCamelCase__ )
]
return {"meteor": np.mean(lowerCamelCase__ )}
| 322
| 1
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
lowerCamelCase__ = int(input('Enter number: ').strip())
print(F'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
| 322
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : int ) ->str:
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) ->str:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(lowerCamelCase__ ) )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[Any] = [sequences]
_UpperCAmelCase : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase__ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCAmelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[Any]=ZeroShotClassificationArgumentHandler() , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = args_parser
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : str=TruncationStrategy.ONLY_FIRST , **lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
_UpperCAmelCase : Optional[Any] = self.tokenizer.eos_token
try:
_UpperCAmelCase : List[str] = self.tokenizer(
lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , )
except Exception as e:
if "too short" in str(lowerCamelCase__ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_UpperCAmelCase : List[Any] = self.tokenizer(
lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def lowerCAmelCase__ ( self : int , **lowerCamelCase__ : Union[str, Any] ) ->Tuple:
'''simple docstring'''
if kwargs.get("multi_class" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : int = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
_UpperCAmelCase : Dict = {}
if "candidate_labels" in kwargs:
_UpperCAmelCase : List[Any] = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
_UpperCAmelCase : Dict = kwargs["hypothesis_template"]
_UpperCAmelCase : List[str] = {}
if "multi_label" in kwargs:
_UpperCAmelCase : Optional[Any] = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self : int , lowerCamelCase__ : Union[str, List[str]] , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[Any] , ) ->Optional[int]:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0:
pass
elif len(lowerCamelCase__ ) == 1 and "candidate_labels" not in kwargs:
_UpperCAmelCase : int = args[0]
else:
raise ValueError(F"""Unable to understand extra arguments {args}""" )
return super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : str="This example is {}." ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self._args_parser(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ):
_UpperCAmelCase : Optional[int] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowerCamelCase__ ) - 1,
**model_input,
}
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Dict = inputs["candidate_label"]
_UpperCAmelCase : Optional[int] = inputs["sequence"]
_UpperCAmelCase : Dict = {k: inputs[k] for k in self.tokenizer.model_input_names}
_UpperCAmelCase : List[Any] = self.model(**lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple=False ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = [outputs["candidate_label"] for outputs in model_outputs]
_UpperCAmelCase : Any = [outputs["sequence"] for outputs in model_outputs]
_UpperCAmelCase : Optional[int] = np.concatenate([output["logits"].numpy() for output in model_outputs] )
_UpperCAmelCase : Optional[Any] = logits.shape[0]
_UpperCAmelCase : Any = len(lowerCamelCase__ )
_UpperCAmelCase : str = N // n
_UpperCAmelCase : str = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowerCamelCase__ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_UpperCAmelCase : int = self.entailment_id
_UpperCAmelCase : List[Any] = -1 if entailment_id == 0 else 0
_UpperCAmelCase : str = reshaped_outputs[..., [contradiction_id, entailment_id]]
_UpperCAmelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ )
_UpperCAmelCase : str = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_UpperCAmelCase : int = reshaped_outputs[..., self.entailment_id]
_UpperCAmelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 322
| 1
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 322
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase = 4_000_000 ):
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase , _UpperCAmelCase : Dict = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : Any = b, a + b
return sum(__lowerCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322
| 1
|
'''simple docstring'''
import math
import qiskit
def __lowerCAmelCase (__lowerCAmelCase = 1 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1 ):
if (
isinstance(__lowerCAmelCase , __lowerCAmelCase )
or isinstance(__lowerCAmelCase , __lowerCAmelCase )
or isinstance(__lowerCAmelCase , __lowerCAmelCase )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(__lowerCAmelCase ) != input_a)
or (math.floor(__lowerCAmelCase ) != input_a)
or (math.floor(__lowerCAmelCase ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
_UpperCAmelCase : Dict = qiskit.QuantumRegister(4 , "qr" )
_UpperCAmelCase : Tuple = qiskit.ClassicalRegister(2 , "cr" )
# list the entries
_UpperCAmelCase : Any = [input_a, input_a, carry_in]
_UpperCAmelCase : List[Any] = qiskit.QuantumCircuit(__lowerCAmelCase , __lowerCAmelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__lowerCAmelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__lowerCAmelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__lowerCAmelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __lowerCAmelCase ) # measure the last two qbits
_UpperCAmelCase : List[str] = qiskit.Aer.get_backend("aer_simulator" )
_UpperCAmelCase : Union[str, Any] = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=1_000 )
return job.result().get_counts(__lowerCAmelCase )
if __name__ == "__main__":
print(F'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 322
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str]=13 , lowerCamelCase__ : Optional[Any]=7 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : int=99 , lowerCamelCase__ : int=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Optional[Any]=4 , lowerCamelCase__ : Optional[int]=37 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Optional[int]=16 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Union[str, Any]=0.0_2 , lowerCamelCase__ : Tuple=4 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : Optional[int] = seq_length
_UpperCAmelCase : int = is_training
_UpperCAmelCase : Dict = use_attention_mask
_UpperCAmelCase : Optional[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : List[Any] = type_sequence_label_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Dict = num_choices
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Dict = None
if self.use_attention_mask:
_UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : int = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = config_and_inputs
_UpperCAmelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class_name.from_pretrained("albert-base-v2" )
_UpperCAmelCase : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_UpperCAmelCase : List[Any] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCAmelCase : Dict = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
_UpperCAmelCase : List[Any] = (1, 11, 7_68)
self.assertEqual(output.shape , lowerCamelCase__ )
_UpperCAmelCase : str = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1E-4 ) )
| 322
| 1
|
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {'facebook/bart-base': BartForConditionalGeneration}
lowerCamelCase__ = {'facebook/bart-base': BartTokenizer}
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=__lowerCAmelCase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=__lowerCAmelCase , default=__lowerCAmelCase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--config_name" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=__lowerCAmelCase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="Where to store the final ONNX file." )
_UpperCAmelCase : Tuple = parser.parse_args()
return args
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase="cpu" ):
_UpperCAmelCase : List[str] = model_dict[model_name].from_pretrained(__lowerCAmelCase ).to(__lowerCAmelCase )
_UpperCAmelCase : Any = tokenizer_dict[model_name].from_pretrained(__lowerCAmelCase )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : List[Any] = 0
return huggingface_model, tokenizer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
model.eval()
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Optional[Any] = torch.jit.script(BARTBeamSearchGenerator(__lowerCAmelCase ) )
with torch.no_grad():
_UpperCAmelCase : int = "My friends are cool but they eat too many carbs."
_UpperCAmelCase : List[Any] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors="pt" ).to(model.device )
_UpperCAmelCase : int = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=__lowerCAmelCase , max_length=__lowerCAmelCase , early_stopping=__lowerCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__lowerCAmelCase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __lowerCAmelCase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=__lowerCAmelCase , )
logger.info("Model exported to {}".format(__lowerCAmelCase ) )
_UpperCAmelCase : Tuple = remove_dup_initializers(os.path.abspath(__lowerCAmelCase ) )
logger.info("Deduplicated and optimized model written to {}".format(__lowerCAmelCase ) )
_UpperCAmelCase : Optional[Any] = onnxruntime.InferenceSession(__lowerCAmelCase )
_UpperCAmelCase : Tuple = ort_sess.run(
__lowerCAmelCase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(__lowerCAmelCase ),
"max_length": np.array(__lowerCAmelCase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def __lowerCAmelCase ():
_UpperCAmelCase : List[str] = parse_args()
_UpperCAmelCase : List[Any] = 5
_UpperCAmelCase : Optional[int] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase : List[Any] = torch.device(args.device )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = load_model_tokenizer(args.model_name_or_path , __lowerCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(__lowerCAmelCase )
if args.max_length:
_UpperCAmelCase : Tuple = args.max_length
if args.num_beams:
_UpperCAmelCase : Optional[Any] = args.num_beams
if args.output_file_path:
_UpperCAmelCase : Optional[Any] = args.output_file_path
else:
_UpperCAmelCase : Optional[Any] = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 322
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
lowerCamelCase__ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __lowerCAmelCase (__lowerCAmelCase ):
with open(__lowerCAmelCase , "rb" ) as f:
_UpperCAmelCase : List[str] = Image.open(__lowerCAmelCase )
return im.convert("RGB" )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the training data."} )
lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the validation data."} )
lowerCAmelCase : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : str = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCAmelCase__ )} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
lowerCAmelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase : str = field(default=UpperCAmelCase__ , metadata={"help": "Name or path of preprocessor config."} )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = torch.stack([example["pixel_values"] for example in examples] )
_UpperCAmelCase : Tuple = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __lowerCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , __lowerCAmelCase , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_UpperCAmelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_UpperCAmelCase : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
_UpperCAmelCase : List[Any] = {}
if data_args.train_dir is not None:
_UpperCAmelCase : str = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
_UpperCAmelCase : Optional[Any] = os.path.join(data_args.validation_dir , "**" )
_UpperCAmelCase : Any = load_dataset(
"imagefolder" , data_files=__lowerCAmelCase , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
_UpperCAmelCase : int = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowerCAmelCase ) and data_args.train_val_split > 0.0:
_UpperCAmelCase : List[Any] = dataset["train"].train_test_split(data_args.train_val_split )
_UpperCAmelCase : List[str] = split["train"]
_UpperCAmelCase : Union[str, Any] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_UpperCAmelCase : Optional[int] = dataset["train"].features["labels"].names
_UpperCAmelCase , _UpperCAmelCase : int = {}, {}
for i, label in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : int = str(__lowerCAmelCase )
_UpperCAmelCase : str = label
# Load the accuracy metric from the datasets package
_UpperCAmelCase : int = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCAmelCase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowerCAmelCase ) , labelaid=__lowerCAmelCase , idalabel=__lowerCAmelCase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase : List[str] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
_UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_UpperCAmelCase : int = image_processor.size["shortest_edge"]
else:
_UpperCAmelCase : int = (image_processor.size["height"], image_processor.size["width"])
_UpperCAmelCase : str = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
_UpperCAmelCase : Optional[int] = Compose(
[
RandomResizedCrop(__lowerCAmelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_UpperCAmelCase : Union[str, Any] = Compose(
[
Resize(__lowerCAmelCase ),
CenterCrop(__lowerCAmelCase ),
ToTensor(),
normalize,
] )
def train_transforms(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_UpperCAmelCase : Dict = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__lowerCAmelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_UpperCAmelCase : Optional[Any] = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__lowerCAmelCase )
# Initalize our trainer
_UpperCAmelCase : Union[str, Any] = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase : Any = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : int = last_checkpoint
_UpperCAmelCase : Dict = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCAmelCase : Dict = trainer.evaluate()
trainer.log_metrics("eval" , __lowerCAmelCase )
trainer.save_metrics("eval" , __lowerCAmelCase )
# Write model card and (optionally) push to hub
_UpperCAmelCase : int = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCAmelCase )
else:
trainer.create_model_card(**__lowerCAmelCase )
if __name__ == "__main__":
main()
| 322
| 1
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=100 , __lowerCAmelCase=" " ):
_UpperCAmelCase : Any = text.split(__lowerCAmelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase : Dict = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(__lowerCAmelCase ):
titles.append(title if title is not None else "" )
texts.append(__lowerCAmelCase )
return {"title": titles, "text": texts}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : str = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=__lowerCAmelCase , padding="longest" , return_tensors="pt" )["input_ids"]
_UpperCAmelCase : str = ctx_encoder(input_ids.to(device=__lowerCAmelCase ) , return_dict=__lowerCAmelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
######################################
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_UpperCAmelCase : Optional[int] = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_UpperCAmelCase : Optional[int] = dataset.map(__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
_UpperCAmelCase : Union[str, Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_UpperCAmelCase : Dict = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
_UpperCAmelCase : int = dataset.map(
partial(__lowerCAmelCase , ctx_encoder=__lowerCAmelCase , ctx_tokenizer=__lowerCAmelCase ) , batched=__lowerCAmelCase , batch_size=processing_args.batch_size , features=__lowerCAmelCase , )
# And finally save your dataset
_UpperCAmelCase : List[Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(__lowerCAmelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_UpperCAmelCase : Any = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=__lowerCAmelCase )
# And save the index
_UpperCAmelCase : List[str] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(__lowerCAmelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : str = field(
default=str(Path(UpperCAmelCase__ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
lowerCAmelCase : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
lowerCAmelCase : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
lowerCAmelCase : Optional[str] = field(
default=str(Path(UpperCAmelCase__ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
lowerCAmelCase : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
lowerCAmelCase : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 322
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase__ = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ = 'RegNetConfig'
# Base docstring
lowerCamelCase__ = 'facebook/regnet-y-040'
lowerCamelCase__ = [1, 1_088, 7, 7]
# Image classification docstring
lowerCamelCase__ = 'facebook/regnet-y-040'
lowerCamelCase__ = 'tabby, tabby cat'
lowerCamelCase__ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 3 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[str] = "relu" , **lowerCamelCase__ : Tuple , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_UpperCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_UpperCAmelCase : Dict = tf.keras.layers.ConvaD(
filters=lowerCamelCase__ , kernel_size=lowerCamelCase__ , strides=lowerCamelCase__ , padding="VALID" , groups=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" , )
_UpperCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
_UpperCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.convolution(self.padding(lowerCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = self.normalization(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = config.num_channels
_UpperCAmelCase : Any = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = shape_list(lowerCamelCase__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_UpperCAmelCase : Optional[Any] = tf.transpose(lowerCamelCase__ , perm=(0, 2, 3, 1) )
_UpperCAmelCase : List[Any] = self.embedder(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : int = tf.keras.layers.ConvaD(
filters=lowerCamelCase__ , kernel_size=1 , strides=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" )
_UpperCAmelCase : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False ) ->tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(lowerCamelCase__ ) , training=lowerCamelCase__ )
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ) ->Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" )
_UpperCAmelCase : int = [
tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.pooler(lowerCamelCase__ )
for layer_module in self.attention:
_UpperCAmelCase : str = layer_module(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = hidden_state * pooled
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : Any ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = in_channels != out_channels or stride != 1
_UpperCAmelCase : List[str] = max(1 , out_channels // config.groups_width )
_UpperCAmelCase : List[str] = (
TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_UpperCAmelCase : Optional[int] = [
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.2" ),
]
_UpperCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = hidden_state
for layer_module in self.layers:
_UpperCAmelCase : List[Any] = layer_module(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.shortcut(lowerCamelCase__ )
hidden_state += residual
_UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = in_channels != out_channels or stride != 1
_UpperCAmelCase : Optional[int] = max(1 , out_channels // config.groups_width )
_UpperCAmelCase : Union[str, Any] = (
TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
_UpperCAmelCase : List[Any] = [
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(lowerCamelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.3" ),
]
_UpperCAmelCase : int = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = hidden_state
for layer_module in self.layers:
_UpperCAmelCase : Tuple = layer_module(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.shortcut(lowerCamelCase__ )
hidden_state += residual
_UpperCAmelCase : Tuple = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : str = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
_UpperCAmelCase : List[str] = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ , name="layers.0" ),
*[layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] ) ->List[str]:
'''simple docstring'''
for layer_module in self.layers:
_UpperCAmelCase : Optional[int] = layer_module(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
_UpperCAmelCase : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , depth=lowerCamelCase__ , name=F"""stages.{i+1}""" ) )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True ) ->TFBaseModelOutputWithNoAttention:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCAmelCase : Optional[Any] = hidden_states + (hidden_state,)
_UpperCAmelCase : Dict = stage_module(lowerCamelCase__ )
if output_hidden_states:
_UpperCAmelCase : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase__ , hidden_states=lowerCamelCase__ )
@keras_serializable
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
lowerCAmelCase : Optional[Any] = RegNetConfig
def __init__( self : Union[str, Any] , lowerCamelCase__ : Any , **lowerCamelCase__ : str ) ->int:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = config
_UpperCAmelCase : Union[str, Any] = TFRegNetEmbeddings(lowerCamelCase__ , name="embedder" )
_UpperCAmelCase : Union[str, Any] = TFRegNetEncoder(lowerCamelCase__ , name="encoder" )
_UpperCAmelCase : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" )
@unpack_inputs
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , ) ->TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
_UpperCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.embedder(lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : str = self.encoder(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : Dict = encoder_outputs[0]
_UpperCAmelCase : Dict = self.pooler(lowerCamelCase__ )
# Change to NCHW output format have uniformity in the modules
_UpperCAmelCase : Union[str, Any] = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) )
_UpperCAmelCase : Tuple = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_UpperCAmelCase : List[str] = tuple([tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase__ , pooler_output=lowerCamelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Tuple = RegNetConfig
lowerCAmelCase : Tuple = "regnet"
lowerCAmelCase : Union[str, Any] = "pixel_values"
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
lowerCamelCase__ = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCamelCase__ = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Any , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ) ->Optional[int]:
'''simple docstring'''
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Any=False , ) ->Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.regnet(
pixel_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Union[str, Any] ) ->Any:
'''simple docstring'''
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = config.num_labels
_UpperCAmelCase : Dict = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" )
# classification head
_UpperCAmelCase : str = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict=False , ) ->Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
_UpperCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.regnet(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : int = outputs.pooler_output if return_dict else outputs[1]
_UpperCAmelCase : Dict = self.classifier[0](lowerCamelCase__ )
_UpperCAmelCase : str = self.classifier[1](lowerCamelCase__ )
_UpperCAmelCase : Tuple = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase__ , logits=lowerCamelCase__ )
if not return_dict:
_UpperCAmelCase : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase__ , logits=lowerCamelCase__ , hidden_states=outputs.hidden_states )
| 322
| 1
|
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = []
for line in lines:
_UpperCAmelCase : int = re.sub(R"#.*" , "" , __lowerCAmelCase ) # remove comments
if line:
filtered_lines.append(__lowerCAmelCase )
_UpperCAmelCase : int = "\n".join(__lowerCAmelCase )
# Make a hash from all this code
_UpperCAmelCase : Optional[Any] = full_str.encode("utf-8" )
return shaaaa(__lowerCAmelCase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase__ = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase__ = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase__ = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCamelCase__ = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 322
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __lowerCAmelCase (__lowerCAmelCase ):
if is_torch_version("<" , "2.0.0" ) or not hasattr(__lowerCAmelCase , "_dynamo" ):
return False
return isinstance(__lowerCAmelCase , torch._dynamo.eval_frame.OptimizedModule )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = True ):
_UpperCAmelCase : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_UpperCAmelCase : Dict = is_compiled_module(__lowerCAmelCase )
if is_compiled:
_UpperCAmelCase : Optional[int] = model
_UpperCAmelCase : Any = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = model.module
if not keep_fpaa_wrapper:
_UpperCAmelCase : List[Any] = getattr(__lowerCAmelCase , "forward" )
_UpperCAmelCase : Dict = model.__dict__.pop("_original_forward" , __lowerCAmelCase )
if original_forward is not None:
while hasattr(__lowerCAmelCase , "__wrapped__" ):
_UpperCAmelCase : Optional[int] = forward.__wrapped__
if forward == original_forward:
break
_UpperCAmelCase : Dict = forward
if getattr(__lowerCAmelCase , "_converted_to_transformer_engine" , __lowerCAmelCase ):
convert_model(__lowerCAmelCase , to_transformer_engine=__lowerCAmelCase )
if is_compiled:
_UpperCAmelCase : int = model
_UpperCAmelCase : str = compiled_model
return model
def __lowerCAmelCase ():
PartialState().wait_for_everyone()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__lowerCAmelCase , __lowerCAmelCase )
elif PartialState().local_process_index == 0:
torch.save(__lowerCAmelCase , __lowerCAmelCase )
@contextmanager
def __lowerCAmelCase (**__lowerCAmelCase ):
for key, value in kwargs.items():
_UpperCAmelCase : str = str(__lowerCAmelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __lowerCAmelCase (__lowerCAmelCase ):
if not hasattr(__lowerCAmelCase , "__qualname__" ) and not hasattr(__lowerCAmelCase , "__name__" ):
_UpperCAmelCase : List[str] = getattr(__lowerCAmelCase , "__class__" , __lowerCAmelCase )
if hasattr(__lowerCAmelCase , "__qualname__" ):
return obj.__qualname__
if hasattr(__lowerCAmelCase , "__name__" ):
return obj.__name__
return str(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
for key, value in source.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = destination.setdefault(__lowerCAmelCase , {} )
merge_dicts(__lowerCAmelCase , __lowerCAmelCase )
else:
_UpperCAmelCase : Optional[int] = value
return destination
def __lowerCAmelCase (__lowerCAmelCase = None ):
if port is None:
_UpperCAmelCase : Tuple = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 322
| 1
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase__ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
# Initialise PyTorch model
_UpperCAmelCase : List[str] = XLNetConfig.from_json_file(__lowerCAmelCase )
_UpperCAmelCase : List[str] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
_UpperCAmelCase : List[str] = finetuning_task
_UpperCAmelCase : Tuple = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCAmelCase : str = XLNetForSequenceClassification(__lowerCAmelCase )
elif "squad" in finetuning_task:
_UpperCAmelCase : Tuple = finetuning_task
_UpperCAmelCase : Optional[Any] = XLNetForQuestionAnswering(__lowerCAmelCase )
else:
_UpperCAmelCase : Any = XLNetLMHeadModel(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
_UpperCAmelCase : Dict = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : List[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
print(F"""Save PyTorch model to {os.path.abspath(__lowerCAmelCase )}""" )
torch.save(model.state_dict() , __lowerCAmelCase )
print(F"""Save configuration file to {os.path.abspath(__lowerCAmelCase )}""" )
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
lowerCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 322
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322
| 1
|
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : int = mock.Mock()
_UpperCAmelCase : str = 5_00
_UpperCAmelCase : Optional[int] = {}
_UpperCAmelCase : int = HTTPError
_UpperCAmelCase : List[str] = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase : List[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=lowerCamelCase__ ) as mock_head:
_UpperCAmelCase : Tuple = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = mock.Mock()
_UpperCAmelCase : List[str] = 5_00
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : str = HTTPError
_UpperCAmelCase : str = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase : Optional[int] = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=lowerCamelCase__ ) as mock_head:
_UpperCAmelCase : str = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
try:
_UpperCAmelCase : Tuple = tempfile.mktemp()
with open(lowerCamelCase__ , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , lowerCamelCase__ )
_UpperCAmelCase : int = AlbertTokenizer.from_pretrained(lowerCamelCase__ )
finally:
os.remove(lowerCamelCase__ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 10_00 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase : List[Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowerCAmelCase__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : str = os.path.join(lowerCamelCase__ , "vocab.txt" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
_UpperCAmelCase : List[Any] = BertTokenizer(lowerCamelCase__ )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
_UpperCAmelCase : int = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase__ , repo_id="test-tokenizer" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_UpperCAmelCase : Tuple = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : Optional[int] = os.path.join(lowerCamelCase__ , "vocab.txt" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
_UpperCAmelCase : List[Any] = BertTokenizer(lowerCamelCase__ )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
_UpperCAmelCase : List[Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase__ , repo_id="valid_org/test-tokenizer-org" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_UpperCAmelCase : int = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : Optional[Any] = os.path.join(lowerCamelCase__ , "vocab.txt" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
_UpperCAmelCase : Optional[int] = CustomTokenizer(lowerCamelCase__ )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
_UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : str = os.path.join(lowerCamelCase__ , "vocab.txt" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
_UpperCAmelCase : str = BertTokenizerFast.from_pretrained(lowerCamelCase__ )
bert_tokenizer.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = CustomTokenizerFast.from_pretrained(lowerCamelCase__ )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained(
F"""{USER}/test-dynamic-tokenizer""" , use_fast=lowerCamelCase__ , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowerCAmelCase__ ( self : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : int = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def lowerCAmelCase__ ( self : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase__ ( self : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase : List[str] = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def lowerCAmelCase__ ( self : List[str] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = Trie()
_UpperCAmelCase : int = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase__ , ["AB", "C"] )
| 322
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
lowerCamelCase__ = int(input('Enter number: ').strip())
print(F'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
| 322
| 1
|
'''simple docstring'''
lowerCamelCase__ = 65_521
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : Any = 0
for plain_chr in plain_text:
_UpperCAmelCase : Dict = (a + ord(__lowerCAmelCase )) % MOD_ADLER
_UpperCAmelCase : str = (b + a) % MOD_ADLER
return (b << 16) | a
| 322
|
'''simple docstring'''
from collections.abc import Sequence
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return sum(c * (x**i) for i, c in enumerate(__lowerCAmelCase ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = 0.0
for coeff in reversed(__lowerCAmelCase ):
_UpperCAmelCase : int = result * x + coeff
return result
if __name__ == "__main__":
lowerCamelCase__ = (0.0, 0.0, 5.0, 9.3, 7.0)
lowerCamelCase__ = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 322
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar('T')
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : T ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = data
_UpperCAmelCase : Node[T] | None = None
def __str__( self : Any ) ->str:
'''simple docstring'''
return F"""{self.data}"""
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Tuple ) ->None:
'''simple docstring'''
_UpperCAmelCase : Node[T] | None = None
def __iter__( self : List[str] ) ->Iterator[T]:
'''simple docstring'''
_UpperCAmelCase : Any = self.top
while node:
yield node.data
_UpperCAmelCase : Dict = node.next
def __str__( self : Dict ) ->str:
'''simple docstring'''
return "->".join([str(lowerCamelCase__ ) for item in self] )
def __len__( self : Optional[int] ) ->int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def lowerCAmelCase__ ( self : List[Any] ) ->bool:
'''simple docstring'''
return self.top is None
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : T ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = Node(lowerCamelCase__ )
if not self.is_empty():
_UpperCAmelCase : Tuple = self.top
_UpperCAmelCase : List[str] = node
def lowerCAmelCase__ ( self : Union[str, Any] ) ->T:
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.top
_UpperCAmelCase : Optional[Any] = self.top.next
return pop_node.data
def lowerCAmelCase__ ( self : Union[str, Any] ) ->T:
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def lowerCAmelCase__ ( self : List[Any] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 322
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
_UpperCAmelCase : Tuple = sum(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_UpperCAmelCase : Any = True
for i in range(1 , s + 1 ):
_UpperCAmelCase : List[Any] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_UpperCAmelCase : Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
_UpperCAmelCase : Any = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_UpperCAmelCase : List[Any] = s - 2 * j
break
return diff
| 322
| 1
|
'''simple docstring'''
from math import factorial
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(__lowerCAmelCase ) // (factorial(__lowerCAmelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
F'''4 for group projects, there are {combinations(40, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
F'''are {combinations(10, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 322
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "resnet"
lowerCAmelCase : Union[str, Any] = ["basic", "bottleneck"]
def __init__( self : Dict , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Any=64 , lowerCamelCase__ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase__ : int=[3, 4, 6, 3] , lowerCamelCase__ : Dict="bottleneck" , lowerCamelCase__ : Dict="relu" , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Any=None , lowerCamelCase__ : int=None , **lowerCamelCase__ : Tuple , ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : List[str] = embedding_size
_UpperCAmelCase : Tuple = hidden_sizes
_UpperCAmelCase : Dict = depths
_UpperCAmelCase : List[Any] = layer_type
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Tuple = downsample_in_first_stage
_UpperCAmelCase : str = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[Any] = version.parse("1.11" )
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self : str ) ->float:
'''simple docstring'''
return 1E-3
| 322
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[str] = "deit"
def __init__( self : Union[str, Any] , lowerCamelCase__ : int=7_68 , lowerCamelCase__ : List[Any]=12 , lowerCamelCase__ : List[Any]=12 , lowerCamelCase__ : Optional[Any]=30_72 , lowerCamelCase__ : int="gelu" , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : Dict=0.0_2 , lowerCamelCase__ : List[str]=1E-12 , lowerCamelCase__ : Optional[Any]=2_24 , lowerCamelCase__ : Dict=16 , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Any=16 , **lowerCamelCase__ : Union[str, Any] , ) ->str:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : Any = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : str = initializer_range
_UpperCAmelCase : Dict = layer_norm_eps
_UpperCAmelCase : str = image_size
_UpperCAmelCase : int = patch_size
_UpperCAmelCase : Union[str, Any] = num_channels
_UpperCAmelCase : Dict = qkv_bias
_UpperCAmelCase : Tuple = encoder_stride
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[int] = version.parse("1.11" )
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->float:
'''simple docstring'''
return 1E-4
| 322
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __lowerCAmelCase (__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
_UpperCAmelCase : int = [image]
_UpperCAmelCase : str = [trans(img.convert("RGB" ) ) for img in image]
_UpperCAmelCase : Optional[Any] = torch.stack(__lowerCAmelCase )
return image
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : int ) ->int:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str ) ->Union[str, Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = min(int(num_inference_steps * strength ) , lowerCamelCase__ )
_UpperCAmelCase : str = max(num_inference_steps - init_timestep , 0 )
_UpperCAmelCase : List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any]=None ) ->str:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase__ )}""" )
_UpperCAmelCase : Union[str, Any] = image.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_UpperCAmelCase : List[str] = init_latents.shape
_UpperCAmelCase : Optional[int] = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ )
# get latents
print("add noise to latents at timestep" , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.scheduler.add_noise(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , lowerCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image] = None , lowerCamelCase__ : float = 0.8 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , ) ->Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowerCamelCase__ )
# 2. Preprocess image
_UpperCAmelCase : Dict = preprocess(lowerCamelCase__ )
# 3. set timesteps
self.scheduler.set_timesteps(lowerCamelCase__ , device=self.device )
_UpperCAmelCase , _UpperCAmelCase : Any = self.get_timesteps(lowerCamelCase__ , lowerCamelCase__ , self.device )
_UpperCAmelCase : List[Any] = timesteps[:1].repeat(lowerCamelCase__ )
# 4. Prepare latent variables
_UpperCAmelCase : Optional[int] = self.prepare_latents(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.unet.dtype , self.device , lowerCamelCase__ )
_UpperCAmelCase : Any = latents
# 5. Denoising loop
for t in self.progress_bar(lowerCamelCase__ ):
# 1. predict noise model_output
_UpperCAmelCase : Union[str, Any] = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase : int = self.scheduler.step(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , eta=lowerCamelCase__ , use_clipped_model_output=lowerCamelCase__ , generator=lowerCamelCase__ , ).prev_sample
_UpperCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowerCamelCase__ )
| 322
| 1
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase = 4_000_000 ):
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase , _UpperCAmelCase : Dict = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : Any = b, a + b
return sum(__lowerCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
lowerCamelCase__ = list[list[float | int]]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : float
for row in range(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = matrix[row][col]
_UpperCAmelCase : Optional[int] = vector[row][0]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCAmelCase , __lowerCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase : str = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __lowerCAmelCase ):
for row in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCAmelCase )
]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix = [[0] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
for x_val, y_val in enumerate(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase : int = y_val
_UpperCAmelCase : List[str] = solve(__lowerCAmelCase , __lowerCAmelCase )
def interpolated_func(__lowerCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCAmelCase ) )
return interpolated_func
def __lowerCAmelCase (__lowerCAmelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __lowerCAmelCase (__lowerCAmelCase = question_function , __lowerCAmelCase = 10 ):
_UpperCAmelCase : list[int] = [func(__lowerCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Callable[[int], int]
_UpperCAmelCase : int
for poly in polynomials:
_UpperCAmelCase : int = 1
while func(__lowerCAmelCase ) == poly(__lowerCAmelCase ):
x_val += 1
ret += poly(__lowerCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322
| 1
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = filter(lambda __lowerCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase : Union[str, Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCamelCase__ = logging.getLogger(__name__)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if metric == "rouge2":
_UpperCAmelCase : Optional[int] = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_UpperCAmelCase : int = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_UpperCAmelCase : List[Any] = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
_UpperCAmelCase : Any = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
_UpperCAmelCase : int = ModelCheckpoint(
dirpath=__lowerCAmelCase , filename=__lowerCAmelCase , monitor=F"""val_{metric}""" , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=__lowerCAmelCase , verbose=__lowerCAmelCase , )
class lowerCAmelCase__ ( pl.Callback ):
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : List[str] = {F"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCamelCase__ )
@rank_zero_only
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : pl.Trainer , lowerCamelCase__ : pl.LightningModule , lowerCamelCase__ : str , lowerCamelCase__ : int=True ) ->None:
'''simple docstring'''
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_UpperCAmelCase : List[Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_UpperCAmelCase : Optional[int] = Path(pl_module.hparams.output_dir )
if type_path == "test":
_UpperCAmelCase : Optional[Any] = od / "test_results.txt"
_UpperCAmelCase : Optional[int] = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase : Tuple = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
_UpperCAmelCase : Union[str, Any] = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCamelCase__ )
generations_file.parent.mkdir(exist_ok=lowerCamelCase__ )
with open(lowerCamelCase__ , "a+" ) as writer:
for key in sorted(lowerCamelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase : Any = metrics[key]
if isinstance(lowerCamelCase__ , torch.Tensor ):
_UpperCAmelCase : Optional[int] = val.item()
_UpperCAmelCase : Optional[int] = F"""{key}: {val:.6f}\n"""
writer.write(lowerCamelCase__ )
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase : List[Any] = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(lowerCamelCase__ )
@rank_zero_only
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple ) ->Optional[int]:
'''simple docstring'''
try:
_UpperCAmelCase : str = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase : Optional[int] = pl_module.model.num_parameters()
_UpperCAmelCase : Union[str, Any] = count_trainable_parameters(lowerCamelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : pl.Trainer , lowerCamelCase__ : pl.LightningModule ) ->Tuple:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCamelCase__ , lowerCamelCase__ , "test" )
@rank_zero_only
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : pl.Trainer , lowerCamelCase__ : Optional[int] ) ->Optional[int]:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 322
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 322
| 1
|
'''simple docstring'''
from manim import *
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase : Dict = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_UpperCAmelCase : Dict = [mem.copy() for i in range(6 )]
_UpperCAmelCase : int = [mem.copy() for i in range(6 )]
_UpperCAmelCase : List[Any] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : List[str] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : str = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : str = Text("CPU" , font_size=24 )
_UpperCAmelCase : Optional[int] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : str = [mem.copy() for i in range(4 )]
_UpperCAmelCase : Optional[int] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Dict = Text("GPU" , font_size=24 )
_UpperCAmelCase : List[Any] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Tuple = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[int] = Text("Model" , font_size=24 )
_UpperCAmelCase : int = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = []
for i, rect in enumerate(lowerCamelCase__ ):
rect.set_stroke(lowerCamelCase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_UpperCAmelCase : Optional[int] = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCamelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowerCamelCase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowerCamelCase__ , buff=0.0 )
self.add(lowerCamelCase__ )
cpu_targs.append(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Tuple = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[int] = Text("Loaded Checkpoint" , font_size=24 )
_UpperCAmelCase : Optional[int] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , aligned_edge=lowerCamelCase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_UpperCAmelCase : Tuple = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase : Optional[Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowerCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_UpperCAmelCase : Union[str, Any] = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ ) , Write(lowerCamelCase__ ) )
self.play(Write(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) )
_UpperCAmelCase : Any = []
_UpperCAmelCase : str = []
for i, rect in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : Dict = fill.copy().set_fill(lowerCamelCase__ , opacity=0.7 )
target.move_to(lowerCamelCase__ )
first_animations.append(GrowFromCenter(lowerCamelCase__ , run_time=1 ) )
_UpperCAmelCase : Dict = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(*lowerCamelCase__ )
self.wait()
| 322
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar('T')
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : T ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = data
_UpperCAmelCase : Node[T] | None = None
def __str__( self : Any ) ->str:
'''simple docstring'''
return F"""{self.data}"""
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Tuple ) ->None:
'''simple docstring'''
_UpperCAmelCase : Node[T] | None = None
def __iter__( self : List[str] ) ->Iterator[T]:
'''simple docstring'''
_UpperCAmelCase : Any = self.top
while node:
yield node.data
_UpperCAmelCase : Dict = node.next
def __str__( self : Dict ) ->str:
'''simple docstring'''
return "->".join([str(lowerCamelCase__ ) for item in self] )
def __len__( self : Optional[int] ) ->int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def lowerCAmelCase__ ( self : List[Any] ) ->bool:
'''simple docstring'''
return self.top is None
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : T ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = Node(lowerCamelCase__ )
if not self.is_empty():
_UpperCAmelCase : Tuple = self.top
_UpperCAmelCase : List[str] = node
def lowerCAmelCase__ ( self : Union[str, Any] ) ->T:
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.top
_UpperCAmelCase : Optional[Any] = self.top.next
return pop_node.data
def lowerCAmelCase__ ( self : Union[str, Any] ) ->T:
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def lowerCAmelCase__ ( self : List[Any] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 322
| 1
|
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
lowerCamelCase__ = 'src/transformers'
# Matches is_xxx_available()
lowerCamelCase__ = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
lowerCamelCase__ = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase__ = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
lowerCamelCase__ = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase__ = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase__ = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase__ = re.compile(r'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase__ = re.compile(r'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
lowerCamelCase__ = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
lowerCamelCase__ = re.compile(r'^\s*try:')
# Catches a line with else:
lowerCamelCase__ = re.compile(r'^\s*else:')
def __lowerCAmelCase (__lowerCAmelCase ):
if _re_test_backend.search(__lowerCAmelCase ) is None:
return None
_UpperCAmelCase : Optional[int] = [b[0] for b in _re_backend.findall(__lowerCAmelCase )]
backends.sort()
return "_and_".join(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
with open(__lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCAmelCase : str = f.readlines()
_UpperCAmelCase : Union[str, Any] = 0
while line_index < len(__lowerCAmelCase ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__lowerCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
_UpperCAmelCase : Dict = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
_UpperCAmelCase : int = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__lowerCAmelCase ):
_UpperCAmelCase : Tuple = _re_one_line_import_struct.search(__lowerCAmelCase ).groups()[0]
_UpperCAmelCase : Optional[int] = re.findall(R"\[([^\]]+)\]" , __lowerCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
_UpperCAmelCase : int = _re_import_struct_key_value.search(__lowerCAmelCase )
if single_line_import_search is not None:
_UpperCAmelCase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
_UpperCAmelCase : List[Any] = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_UpperCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCAmelCase : List[str] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCAmelCase : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
_UpperCAmelCase : Dict = lines[line_index]
if _re_import_struct_add_one.search(__lowerCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__lowerCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__lowerCAmelCase ) is not None:
_UpperCAmelCase : List[Any] = _re_import_struct_add_many.search(__lowerCAmelCase ).groups()[0].split(", " )
_UpperCAmelCase : Optional[int] = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif _re_between_brackets.search(__lowerCAmelCase ) is not None:
_UpperCAmelCase : Any = _re_between_brackets.search(__lowerCAmelCase ).groups()[0].split(", " )
_UpperCAmelCase : Optional[int] = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif _re_quote_object.search(__lowerCAmelCase ) is not None:
objects.append(_re_quote_object.search(__lowerCAmelCase ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
_UpperCAmelCase : int = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_UpperCAmelCase : Any = []
while (
line_index < len(__lowerCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
_UpperCAmelCase : Optional[int] = lines[line_index]
_UpperCAmelCase : Union[str, Any] = _re_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
_UpperCAmelCase : Optional[Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__lowerCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
_UpperCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCAmelCase : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCAmelCase : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
_UpperCAmelCase : List[str] = lines[line_index]
_UpperCAmelCase : Dict = _re_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
_UpperCAmelCase : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
def find_duplicates(__lowerCAmelCase ):
return [k for k, v in collections.Counter(__lowerCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_UpperCAmelCase : Dict = []
for key in import_dict_objects.keys():
_UpperCAmelCase : Optional[int] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
_UpperCAmelCase : Any = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_UpperCAmelCase : Optional[int] = "base imports" if key == "none" else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __lowerCAmelCase ():
_UpperCAmelCase : int = []
for root, _, files in os.walk(__lowerCAmelCase ):
if "__init__.py" in files:
_UpperCAmelCase : int = os.path.join(__lowerCAmelCase , "__init__.py" )
_UpperCAmelCase : List[Any] = parse_init(__lowerCAmelCase )
if objects is not None:
_UpperCAmelCase : Any = analyze_results(*__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
_UpperCAmelCase : Optional[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("\n".join(__lowerCAmelCase ) )
if len(__lowerCAmelCase ) > 0:
raise ValueError("\n\n".join(__lowerCAmelCase ) )
def __lowerCAmelCase ():
_UpperCAmelCase : Dict = []
for path, directories, files in os.walk(__lowerCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__lowerCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__lowerCAmelCase ) / folder).glob("*.py" ) ) ) == 0:
continue
_UpperCAmelCase : Dict = str((Path(__lowerCAmelCase ) / folder).relative_to(__lowerCAmelCase ) )
_UpperCAmelCase : Union[str, Any] = short_path.replace(os.path.sep , "." )
submodules.append(__lowerCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
_UpperCAmelCase : Dict = str((Path(__lowerCAmelCase ) / fname).relative_to(__lowerCAmelCase ) )
_UpperCAmelCase : List[Any] = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__lowerCAmelCase )
return submodules
lowerCamelCase__ = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def __lowerCAmelCase ():
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_UpperCAmelCase : Any = direct_transformers_import(__lowerCAmelCase )
_UpperCAmelCase : Any = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__lowerCAmelCase , "__init__.py" ) , "r" ) as f:
_UpperCAmelCase : Tuple = f.read()
import_structure_keys.update(set(re.findall(R"import_structure\[\"([^\"]*)\"\]" , __lowerCAmelCase ) ) )
_UpperCAmelCase : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__lowerCAmelCase ) > 0:
_UpperCAmelCase : List[Any] = "\n".join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
F"""{list_of_modules}\n"""
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 322
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : int = "speech_to_text_2"
lowerCAmelCase : str = ["past_key_values"]
lowerCAmelCase : int = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[Any] , lowerCamelCase__ : Tuple=1_00_00 , lowerCamelCase__ : Any=6 , lowerCamelCase__ : Tuple=20_48 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple="relu" , lowerCamelCase__ : Dict=2_56 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : List[Any]=0.0_2 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Any=1 , lowerCamelCase__ : int=0 , lowerCamelCase__ : str=2 , lowerCamelCase__ : List[Any]=10_24 , **lowerCamelCase__ : str , ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Optional[int] = d_model
_UpperCAmelCase : List[Any] = decoder_ffn_dim
_UpperCAmelCase : Any = decoder_layers
_UpperCAmelCase : int = decoder_attention_heads
_UpperCAmelCase : Any = dropout
_UpperCAmelCase : List[Any] = attention_dropout
_UpperCAmelCase : Optional[int] = activation_dropout
_UpperCAmelCase : List[Any] = activation_function
_UpperCAmelCase : int = init_std
_UpperCAmelCase : Dict = decoder_layerdrop
_UpperCAmelCase : str = use_cache
_UpperCAmelCase : Union[str, Any] = decoder_layers
_UpperCAmelCase : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase : Any = max_target_positions
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
| 322
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[str] = ["pixel_values"]
def __init__( self : Tuple , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = PIL.Image.BICUBIC , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : Union[int, float] = 1 / 2_55 , lowerCamelCase__ : bool = True , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , **lowerCamelCase__ : Optional[int] , ) ->None:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : str = size if size is not None else {"height": 2_56, "width": 2_56}
_UpperCAmelCase : int = get_size_dict(lowerCamelCase__ )
_UpperCAmelCase : Any = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
_UpperCAmelCase : Tuple = get_size_dict(lowerCamelCase__ , param_name="crop_size" )
_UpperCAmelCase : str = do_resize
_UpperCAmelCase : List[str] = size
_UpperCAmelCase : List[str] = resample
_UpperCAmelCase : List[str] = do_center_crop
_UpperCAmelCase : int = crop_size
_UpperCAmelCase : int = do_rescale
_UpperCAmelCase : Tuple = rescale_factor
_UpperCAmelCase : Tuple = do_normalize
_UpperCAmelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : PILImageResampling = PIL.Image.BICUBIC , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : List[str] , ) ->np.ndarray:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
lowerCamelCase__ , size=(size["height"], size["width"]) , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Any , ) ->np.ndarray:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowerCamelCase__ , size=(size["height"], size["width"]) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[int, float] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Tuple , ) ->str:
'''simple docstring'''
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Union[float, List[float]] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : List[Any] , ) ->np.ndarray:
'''simple docstring'''
return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : ImageInput , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : float = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[float, List[float]]] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase__ : Dict , ) ->PIL.Image.Image:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : List[Any] = resample if resample is not None else self.resample
_UpperCAmelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : Any = image_std if image_std is not None else self.image_std
_UpperCAmelCase : Union[str, Any] = size if size is not None else self.size
_UpperCAmelCase : List[str] = get_size_dict(lowerCamelCase__ )
_UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase : Optional[Any] = get_size_dict(lowerCamelCase__ , param_name="crop_size" )
_UpperCAmelCase : List[Any] = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_UpperCAmelCase : List[str] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
_UpperCAmelCase : Union[str, Any] = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
_UpperCAmelCase : Optional[int] = [self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images]
if do_rescale:
_UpperCAmelCase : Optional[int] = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
if do_normalize:
_UpperCAmelCase : str = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ ) for image in images]
_UpperCAmelCase : Dict = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
_UpperCAmelCase : Optional[int] = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
| 322
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=100 , __lowerCAmelCase=" " ):
_UpperCAmelCase : Any = text.split(__lowerCAmelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase : Dict = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(__lowerCAmelCase ):
titles.append(title if title is not None else "" )
texts.append(__lowerCAmelCase )
return {"title": titles, "text": texts}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : str = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=__lowerCAmelCase , padding="longest" , return_tensors="pt" )["input_ids"]
_UpperCAmelCase : str = ctx_encoder(input_ids.to(device=__lowerCAmelCase ) , return_dict=__lowerCAmelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
######################################
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_UpperCAmelCase : Optional[int] = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_UpperCAmelCase : Optional[int] = dataset.map(__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
_UpperCAmelCase : Union[str, Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_UpperCAmelCase : Dict = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
_UpperCAmelCase : int = dataset.map(
partial(__lowerCAmelCase , ctx_encoder=__lowerCAmelCase , ctx_tokenizer=__lowerCAmelCase ) , batched=__lowerCAmelCase , batch_size=processing_args.batch_size , features=__lowerCAmelCase , )
# And finally save your dataset
_UpperCAmelCase : List[Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(__lowerCAmelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_UpperCAmelCase : Any = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=__lowerCAmelCase )
# And save the index
_UpperCAmelCase : List[str] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(__lowerCAmelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : str = field(
default=str(Path(UpperCAmelCase__ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
lowerCAmelCase : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
lowerCAmelCase : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
lowerCAmelCase : Optional[str] = field(
default=str(Path(UpperCAmelCase__ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
lowerCAmelCase : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
lowerCAmelCase : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 322
| 1
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__lowerCAmelCase )
if number < 0:
return False
_UpperCAmelCase : List[str] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase__ = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
@classmethod
def lowerCAmelCase__ ( cls : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] ) ->int:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
_UpperCAmelCase : List[str] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ , repo_id="test-config" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_UpperCAmelCase : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
_UpperCAmelCase : List[str] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase__ , repo_id="valid_org/test-config-org" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_UpperCAmelCase : int = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
CustomConfig.register_for_auto_class()
_UpperCAmelCase : int = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
_UpperCAmelCase : str = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_UpperCAmelCase : Any = c.n_embd + 1 # int
_UpperCAmelCase : List[Any] = c.resid_pdrop + 1.0 # float
_UpperCAmelCase : Tuple = not c.scale_attn_weights # bool
_UpperCAmelCase : List[Any] = c.summary_type + "foo" # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(lowerCamelCase__ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(lowerCamelCase__ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(lowerCamelCase__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(lowerCamelCase__ , c.summary_type , "mismatch for key: summary_type" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = PretrainedConfig()
_UpperCAmelCase : Tuple = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
_UpperCAmelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase__ , lowerCamelCase__ )]
if len(lowerCamelCase__ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F""" {', '.join(lowerCamelCase__ )}.""" )
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCAmelCase : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
_UpperCAmelCase : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = mock.Mock()
_UpperCAmelCase : List[str] = 5_00
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : Tuple = HTTPError
_UpperCAmelCase : Any = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase : int = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=lowerCamelCase__ ) as mock_head:
_UpperCAmelCase : Union[str, Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = AutoConfig.from_pretrained("bert-base-cased" )
_UpperCAmelCase : str = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase : Dict = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCamelCase__ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_UpperCAmelCase : Dict = ["config.42.0.0.json"]
_UpperCAmelCase : Union[str, Any] = 7_68
configuration.save_pretrained(lowerCamelCase__ )
shutil.move(os.path.join(lowerCamelCase__ , "config.4.0.0.json" ) , os.path.join(lowerCamelCase__ , "config.42.0.0.json" ) )
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def lowerCAmelCase__ ( self : List[str] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
_UpperCAmelCase : Any = "v4.0.0"
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_UpperCAmelCase : List[Any] = "v3.0.0"
_UpperCAmelCase : int = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 322
| 1
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCAmelCase__ :
def __init__( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str]=13 , lowerCamelCase__ : Tuple=7 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[Any]=99 , lowerCamelCase__ : str=32 , lowerCamelCase__ : Tuple=5 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : str=37 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Optional[Any]=5_12 , lowerCamelCase__ : List[str]=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Dict=0.0_2 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : Optional[int]=4 , lowerCamelCase__ : Union[str, Any]=None , ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Tuple = is_training
_UpperCAmelCase : Optional[Any] = use_token_type_ids
_UpperCAmelCase : str = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Optional[int] = hidden_dropout_prob
_UpperCAmelCase : Dict = attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] = max_position_embeddings
_UpperCAmelCase : int = type_vocab_size
_UpperCAmelCase : Union[str, Any] = type_sequence_label_size
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Any = num_choices
_UpperCAmelCase : Optional[int] = scope
_UpperCAmelCase : Union[str, Any] = self.vocab_size - 1
def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : List[str] = None
if self.use_token_type_ids:
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[Any] = None
if self.use_labels:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : Optional[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_UpperCAmelCase : List[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : str , *lowerCamelCase__ : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = OpenAIGPTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ , head_mask=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , *lowerCamelCase__ : List[str] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = OpenAIGPTLMHeadModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : int = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] , *lowerCamelCase__ : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Dict = OpenAIGPTDoubleHeadsModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Tuple = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , *lowerCamelCase__ : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self.num_labels
_UpperCAmelCase : int = OpenAIGPTForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Any = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCAmelCase : List[Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCAmelCase : str = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCAmelCase : int = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any ) ->Union[str, Any]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int=False ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_UpperCAmelCase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase__ , )
_UpperCAmelCase : int = inputs_dict["labels"]
_UpperCAmelCase : List[str] = inputs_dict["labels"]
_UpperCAmelCase : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCamelCase__ , )
_UpperCAmelCase : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
return inputs_dict
def lowerCAmelCase__ ( self : Optional[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = OpenAIGPTModelTester(self )
_UpperCAmelCase : Tuple = ConfigTester(self , config_class=lowerCamelCase__ , n_embd=37 )
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : str ) ->List[str]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Dict = OpenAIGPTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=lowerCamelCase__ ) # the president is
_UpperCAmelCase : Union[str, Any] = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_UpperCAmelCase : Dict = model.generate(lowerCamelCase__ , do_sample=lowerCamelCase__ )
self.assertListEqual(output_ids[0].tolist() , lowerCamelCase__ )
| 322
|
'''simple docstring'''
from manim import *
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : List[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase : Optional[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Dict = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : str = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[Any] = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : int = Text("CPU" , font_size=24 )
_UpperCAmelCase : Any = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(1 )]
_UpperCAmelCase : str = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : int = Text("GPU" , font_size=24 )
_UpperCAmelCase : str = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
gpu.align_to(lowerCamelCase__ , lowerCamelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Any = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[int] = Text("Model" , font_size=24 )
_UpperCAmelCase : Tuple = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , )
_UpperCAmelCase : int = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
_UpperCAmelCase : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase : Union[str, Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ , run_time=2.5 ) , Write(lowerCamelCase__ ) , Write(lowerCamelCase__ ) )
self.add(lowerCamelCase__ )
_UpperCAmelCase : int = []
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Dict = []
for i, rect in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : int = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 )
cpu_target.move_to(lowerCamelCase__ )
cpu_target.generate_target()
_UpperCAmelCase : Dict = 0.4_6 / 4
_UpperCAmelCase : Any = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCamelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase__ , buff=0.0 )
cpu_targs.append(lowerCamelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase__ ) )
second_animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(*lowerCamelCase__ )
self.wait()
| 322
| 1
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = min(__lowerCAmelCase ) # min() finds the minimum value
_UpperCAmelCase : Any = max(__lowerCAmelCase ) # max() finds the maximum value
_UpperCAmelCase : Optional[Any] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_UpperCAmelCase : Optional[Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_UpperCAmelCase : Any = 0
for count in range(__lowerCAmelCase ):
while holes[count] > 0:
holes[count] -= 1
_UpperCAmelCase : str = count + min_val
i += 1
def __lowerCAmelCase ():
_UpperCAmelCase : Tuple = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__lowerCAmelCase )
print("Sorted order is:" , " ".join(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
| 322
|
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1_024 , __lowerCAmelCase=1_024 , __lowerCAmelCase=False , **__lowerCAmelCase ):
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : List[str] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="train" , **__lowerCAmelCase )
_UpperCAmelCase : Dict = tok.pad_token_id
def get_lens(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = tqdm(
DataLoader(__lowerCAmelCase , batch_size=512 , num_workers=8 , shuffle=__lowerCAmelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_UpperCAmelCase : List[str] = []
for batch in dl:
_UpperCAmelCase : Any = batch["input_ids"].ne(__lowerCAmelCase ).sum(1 ).tolist()
_UpperCAmelCase : Tuple = batch["labels"].ne(__lowerCAmelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__lowerCAmelCase , __lowerCAmelCase ):
max_lens.append(max(__lowerCAmelCase , __lowerCAmelCase ) )
else:
max_lens.extend(__lowerCAmelCase )
return max_lens
_UpperCAmelCase : Dict = get_lens(__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="val" , **__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = get_lens(__lowerCAmelCase )
pickle_save(__lowerCAmelCase , train_ds.len_file )
pickle_save(__lowerCAmelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 322
| 1
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[int] = "t5"
lowerCAmelCase : Optional[int] = ["past_key_values"]
lowerCAmelCase : str = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : List[str] , lowerCamelCase__ : List[str]=3_21_28 , lowerCamelCase__ : str=5_12 , lowerCamelCase__ : Union[str, Any]=64 , lowerCamelCase__ : Any=20_48 , lowerCamelCase__ : Any=6 , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : str=8 , lowerCamelCase__ : int=32 , lowerCamelCase__ : List[str]=1_28 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Any=1E-6 , lowerCamelCase__ : Optional[int]=1.0 , lowerCamelCase__ : Tuple="relu" , lowerCamelCase__ : str=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : Any=0 , lowerCamelCase__ : Union[str, Any]=1 , **lowerCamelCase__ : Optional[Any] , ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = vocab_size
_UpperCAmelCase : Any = d_model
_UpperCAmelCase : int = d_kv
_UpperCAmelCase : List[Any] = d_ff
_UpperCAmelCase : Union[str, Any] = num_layers
_UpperCAmelCase : List[str] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_UpperCAmelCase : str = num_heads
_UpperCAmelCase : Any = relative_attention_num_buckets
_UpperCAmelCase : Optional[Any] = relative_attention_max_distance
_UpperCAmelCase : Optional[int] = dropout_rate
_UpperCAmelCase : List[str] = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_factor
_UpperCAmelCase : Tuple = feed_forward_proj
_UpperCAmelCase : List[Any] = use_cache
_UpperCAmelCase : Union[str, Any] = self.feed_forward_proj.split("-" )
_UpperCAmelCase : List[str] = act_info[-1]
_UpperCAmelCase : List[str] = act_info[0] == "gated"
if len(lowerCamelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCamelCase__ ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_UpperCAmelCase : str = "gelu_new"
super().__init__(
pad_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
@property
def lowerCAmelCase__ ( self : str ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
_UpperCAmelCase : Optional[Any] = "past_encoder_sequence + sequence"
_UpperCAmelCase : Union[str, Any] = {0: "batch"}
_UpperCAmelCase : Any = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_UpperCAmelCase : List[str] = {0: "batch", 1: "decoder_sequence"}
_UpperCAmelCase : List[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ , direction="inputs" )
return common_inputs
@property
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
return 13
| 322
|
'''simple docstring'''
import pytest
lowerCamelCase__ = '__dummy_dataset1__'
lowerCamelCase__ = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __lowerCAmelCase ():
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __lowerCAmelCase ():
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = dataset_loading_script_name
_UpperCAmelCase : Any = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = script_dir / F"""{script_name}.py"""
with open(__lowerCAmelCase , "w" ) as f:
f.write(__lowerCAmelCase )
return str(__lowerCAmelCase )
| 322
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 322
|
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase__ = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCamelCase__ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCamelCase__ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCamelCase__ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] ) ->int:
'''simple docstring'''
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int=0.9 , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : Dict=0.5 ) ->Any:
'''simple docstring'''
if NLTK_VERSION >= version.Version("3.6.5" ):
_UpperCAmelCase : Dict = [
meteor_score.single_meteor_score(
word_tokenize(lowerCamelCase__ ) , word_tokenize(lowerCamelCase__ ) , alpha=lowerCamelCase__ , beta=lowerCamelCase__ , gamma=lowerCamelCase__ )
for ref, pred in zip(lowerCamelCase__ , lowerCamelCase__ )
]
else:
_UpperCAmelCase : Optional[int] = [
meteor_score.single_meteor_score(lowerCamelCase__ , lowerCamelCase__ , alpha=lowerCamelCase__ , beta=lowerCamelCase__ , gamma=lowerCamelCase__ )
for ref, pred in zip(lowerCamelCase__ , lowerCamelCase__ )
]
return {"meteor": np.mean(lowerCamelCase__ )}
| 322
| 1
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCamelCase__ = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class lowerCAmelCase__ ( unittest.TestCase , UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = load_tool("text-question-answering" )
self.tool.setup()
_UpperCAmelCase : Union[str, Any] = load_tool("text-question-answering" , remote=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int = self.tool(lowerCamelCase__ , "What did Hugging Face do in April 2021?" )
self.assertEqual(lowerCamelCase__ , "launched the BigScience Research Workshop" )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Any = self.remote_tool(lowerCamelCase__ , "What did Hugging Face do in April 2021?" )
self.assertEqual(lowerCamelCase__ , "launched the BigScience Research Workshop" )
def lowerCAmelCase__ ( self : Any ) ->str:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.tool(text=lowerCamelCase__ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(lowerCamelCase__ , "launched the BigScience Research Workshop" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.remote_tool(text=lowerCamelCase__ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(lowerCamelCase__ , "launched the BigScience Research Workshop" )
| 322
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : int ) ->str:
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) ->str:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(lowerCamelCase__ ) )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[Any] = [sequences]
_UpperCAmelCase : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase__ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCAmelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[Any]=ZeroShotClassificationArgumentHandler() , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = args_parser
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : str=TruncationStrategy.ONLY_FIRST , **lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
_UpperCAmelCase : Optional[Any] = self.tokenizer.eos_token
try:
_UpperCAmelCase : List[str] = self.tokenizer(
lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , )
except Exception as e:
if "too short" in str(lowerCamelCase__ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_UpperCAmelCase : List[Any] = self.tokenizer(
lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def lowerCAmelCase__ ( self : int , **lowerCamelCase__ : Union[str, Any] ) ->Tuple:
'''simple docstring'''
if kwargs.get("multi_class" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : int = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
_UpperCAmelCase : Dict = {}
if "candidate_labels" in kwargs:
_UpperCAmelCase : List[Any] = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
_UpperCAmelCase : Dict = kwargs["hypothesis_template"]
_UpperCAmelCase : List[str] = {}
if "multi_label" in kwargs:
_UpperCAmelCase : Optional[Any] = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self : int , lowerCamelCase__ : Union[str, List[str]] , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[Any] , ) ->Optional[int]:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0:
pass
elif len(lowerCamelCase__ ) == 1 and "candidate_labels" not in kwargs:
_UpperCAmelCase : int = args[0]
else:
raise ValueError(F"""Unable to understand extra arguments {args}""" )
return super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : str="This example is {}." ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self._args_parser(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ):
_UpperCAmelCase : Optional[int] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowerCamelCase__ ) - 1,
**model_input,
}
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Dict = inputs["candidate_label"]
_UpperCAmelCase : Optional[int] = inputs["sequence"]
_UpperCAmelCase : Dict = {k: inputs[k] for k in self.tokenizer.model_input_names}
_UpperCAmelCase : List[Any] = self.model(**lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple=False ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = [outputs["candidate_label"] for outputs in model_outputs]
_UpperCAmelCase : Any = [outputs["sequence"] for outputs in model_outputs]
_UpperCAmelCase : Optional[int] = np.concatenate([output["logits"].numpy() for output in model_outputs] )
_UpperCAmelCase : Optional[Any] = logits.shape[0]
_UpperCAmelCase : Any = len(lowerCamelCase__ )
_UpperCAmelCase : str = N // n
_UpperCAmelCase : str = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowerCamelCase__ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_UpperCAmelCase : int = self.entailment_id
_UpperCAmelCase : List[Any] = -1 if entailment_id == 0 else 0
_UpperCAmelCase : str = reshaped_outputs[..., [contradiction_id, entailment_id]]
_UpperCAmelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ )
_UpperCAmelCase : str = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_UpperCAmelCase : int = reshaped_outputs[..., self.entailment_id]
_UpperCAmelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 322
| 1
|
'''simple docstring'''
from collections.abc import Sequence
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return sum(c * (x**i) for i, c in enumerate(__lowerCAmelCase ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = 0.0
for coeff in reversed(__lowerCAmelCase ):
_UpperCAmelCase : int = result * x + coeff
return result
if __name__ == "__main__":
lowerCamelCase__ = (0.0, 0.0, 5.0, 9.3, 7.0)
lowerCamelCase__ = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 322
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase = 4_000_000 ):
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase , _UpperCAmelCase : Dict = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : Any = b, a + b
return sum(__lowerCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322
| 1
|
'''simple docstring'''
import os
def __lowerCAmelCase ():
with open(os.path.dirname(__lowerCAmelCase ) + "/grid.txt" ) as f:
_UpperCAmelCase : List[Any] = [] # noqa: E741
for _ in range(20 ):
l.append([int(__lowerCAmelCase ) for x in f.readline().split()] )
_UpperCAmelCase : Any = 0
# right
for i in range(20 ):
for j in range(17 ):
_UpperCAmelCase : Optional[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_UpperCAmelCase : str = temp
# down
for i in range(17 ):
for j in range(20 ):
_UpperCAmelCase : Optional[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_UpperCAmelCase : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
_UpperCAmelCase : Union[str, Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_UpperCAmelCase : Optional[int] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
_UpperCAmelCase : Optional[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_UpperCAmelCase : Union[str, Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 322
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str]=13 , lowerCamelCase__ : Optional[Any]=7 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : int=99 , lowerCamelCase__ : int=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Optional[Any]=4 , lowerCamelCase__ : Optional[int]=37 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Optional[int]=16 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Union[str, Any]=0.0_2 , lowerCamelCase__ : Tuple=4 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : Optional[int] = seq_length
_UpperCAmelCase : int = is_training
_UpperCAmelCase : Dict = use_attention_mask
_UpperCAmelCase : Optional[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : List[Any] = type_sequence_label_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Dict = num_choices
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Dict = None
if self.use_attention_mask:
_UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : int = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = config_and_inputs
_UpperCAmelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class_name.from_pretrained("albert-base-v2" )
_UpperCAmelCase : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_UpperCAmelCase : List[Any] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCAmelCase : Dict = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
_UpperCAmelCase : List[Any] = (1, 11, 7_68)
self.assertEqual(output.shape , lowerCamelCase__ )
_UpperCAmelCase : str = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1E-4 ) )
| 322
| 1
|
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __lowerCAmelCase (__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCAmelCase__ :
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Dict ) ->List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : float ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = np.abs((a - b) ).max()
self.assertLessEqual(lowerCamelCase__ , lowerCamelCase__ , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any]=None , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCamelCase__ )
_UpperCAmelCase : str = model(input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str=None , **lowerCamelCase__ : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.get_vision_text_model(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = {"vision_model": vision_model, "text_model": text_model}
_UpperCAmelCase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = model(input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any]=None , **lowerCamelCase__ : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.get_vision_text_model(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Any = {"vision_model": vision_model, "text_model": text_model}
_UpperCAmelCase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
_UpperCAmelCase : Any = after_output[0]
_UpperCAmelCase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1E-3 )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.get_vision_text_model(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[str] = {"vision_model": vision_model, "text_model": text_model}
_UpperCAmelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCamelCase__ )
_UpperCAmelCase : str = model(
input_ids=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_attentions=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(lowerCamelCase__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase : Dict = to_atuple(vision_model.config.image_size )
_UpperCAmelCase : str = to_atuple(vision_model.config.patch_size )
_UpperCAmelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_UpperCAmelCase : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_UpperCAmelCase : Optional[int] = output.text_model_output.attentions
self.assertEqual(len(lowerCamelCase__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
pt_model.to(lowerCamelCase__ )
pt_model.eval()
# prepare inputs
_UpperCAmelCase : Any = inputs_dict
_UpperCAmelCase : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_UpperCAmelCase : Optional[int] = pt_model(**lowerCamelCase__ ).to_tuple()
_UpperCAmelCase : str = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase__ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase : int = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase__ , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Any = fx_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowerCamelCase__ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase : Tuple = VisionTextDualEncoderModel.from_pretrained(lowerCamelCase__ , from_flax=lowerCamelCase__ )
pt_model_loaded.to(lowerCamelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
_UpperCAmelCase : int = pt_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCamelCase__ , pt_output_loaded.numpy() , 4E-2 )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : str = VisionTextDualEncoderModel(lowerCamelCase__ )
_UpperCAmelCase : Tuple = FlaxVisionTextDualEncoderModel(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = fx_state
self.check_pt_flax_equivalence(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : int = VisionTextDualEncoderModel(lowerCamelCase__ )
_UpperCAmelCase : List[str] = FlaxVisionTextDualEncoderModel(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = load_flax_weights_in_pytorch_model(lowerCamelCase__ , fx_model.params )
self.check_pt_flax_equivalence(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : int = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
self.check_save_load(**lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCamelCase__ )
@is_pt_flax_cross_test
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = config_inputs_dict.pop("vision_config" )
_UpperCAmelCase : Dict = config_inputs_dict.pop("text_config" )
_UpperCAmelCase : Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.check_equivalence_flax_to_pt(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Dict = self.get_pretrained_model_and_inputs()
_UpperCAmelCase : Dict = model_a(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase : int = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : str = model_a(**lowerCamelCase__ )
_UpperCAmelCase : int = after_outputs[0]
_UpperCAmelCase : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1E-5 )
@require_flax
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCamelCase__ , text_from_pt=lowerCamelCase__ , )
_UpperCAmelCase : Optional[int] = 13
_UpperCAmelCase : int = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_UpperCAmelCase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_UpperCAmelCase : Dict = random_attention_mask([batch_size, 4] )
_UpperCAmelCase : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : str , lowerCamelCase__ : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = FlaxViTModel(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = FlaxBertModel(lowerCamelCase__ )
return vision_model, text_model
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase : Dict = FlaxViTModelTester(self )
_UpperCAmelCase : List[str] = FlaxBertModelTester(self )
_UpperCAmelCase : Any = vit_model_tester.prepare_config_and_inputs()
_UpperCAmelCase : int = bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase : List[Any] = vision_config_and_inputs
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
def lowerCAmelCase__ ( self : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=lowerCamelCase__ , text_from_pt=lowerCamelCase__ , )
_UpperCAmelCase : List[str] = 13
_UpperCAmelCase : Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_UpperCAmelCase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_UpperCAmelCase : str = random_attention_mask([batch_size, 4] )
_UpperCAmelCase : Dict = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = FlaxCLIPVisionModel(lowerCamelCase__ )
_UpperCAmelCase : Any = FlaxBertModel(lowerCamelCase__ )
return vision_model, text_model
def lowerCAmelCase__ ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxCLIPVisionModelTester(self )
_UpperCAmelCase : Union[str, Any] = FlaxBertModelTester(self )
_UpperCAmelCase : str = clip_model_tester.prepare_config_and_inputs()
_UpperCAmelCase : int = bert_model_tester.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase : List[str] = vision_config_and_inputs
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Dict ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
_UpperCAmelCase : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
_UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_UpperCAmelCase : Optional[int] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="np" )
_UpperCAmelCase : Dict = model(**lowerCamelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_UpperCAmelCase : Optional[int] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowerCamelCase__ , atol=1E-3 ) )
| 322
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
lowerCamelCase__ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __lowerCAmelCase (__lowerCAmelCase ):
with open(__lowerCAmelCase , "rb" ) as f:
_UpperCAmelCase : List[str] = Image.open(__lowerCAmelCase )
return im.convert("RGB" )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the training data."} )
lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the validation data."} )
lowerCAmelCase : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : str = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCAmelCase__ )} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
lowerCAmelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase : str = field(default=UpperCAmelCase__ , metadata={"help": "Name or path of preprocessor config."} )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = torch.stack([example["pixel_values"] for example in examples] )
_UpperCAmelCase : Tuple = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __lowerCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , __lowerCAmelCase , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_UpperCAmelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_UpperCAmelCase : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
_UpperCAmelCase : List[Any] = {}
if data_args.train_dir is not None:
_UpperCAmelCase : str = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
_UpperCAmelCase : Optional[Any] = os.path.join(data_args.validation_dir , "**" )
_UpperCAmelCase : Any = load_dataset(
"imagefolder" , data_files=__lowerCAmelCase , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
_UpperCAmelCase : int = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowerCAmelCase ) and data_args.train_val_split > 0.0:
_UpperCAmelCase : List[Any] = dataset["train"].train_test_split(data_args.train_val_split )
_UpperCAmelCase : List[str] = split["train"]
_UpperCAmelCase : Union[str, Any] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_UpperCAmelCase : Optional[int] = dataset["train"].features["labels"].names
_UpperCAmelCase , _UpperCAmelCase : int = {}, {}
for i, label in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : int = str(__lowerCAmelCase )
_UpperCAmelCase : str = label
# Load the accuracy metric from the datasets package
_UpperCAmelCase : int = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCAmelCase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowerCAmelCase ) , labelaid=__lowerCAmelCase , idalabel=__lowerCAmelCase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase : List[str] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
_UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_UpperCAmelCase : int = image_processor.size["shortest_edge"]
else:
_UpperCAmelCase : int = (image_processor.size["height"], image_processor.size["width"])
_UpperCAmelCase : str = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
_UpperCAmelCase : Optional[int] = Compose(
[
RandomResizedCrop(__lowerCAmelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_UpperCAmelCase : Union[str, Any] = Compose(
[
Resize(__lowerCAmelCase ),
CenterCrop(__lowerCAmelCase ),
ToTensor(),
normalize,
] )
def train_transforms(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_UpperCAmelCase : Dict = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__lowerCAmelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_UpperCAmelCase : Optional[Any] = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__lowerCAmelCase )
# Initalize our trainer
_UpperCAmelCase : Union[str, Any] = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase : Any = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : int = last_checkpoint
_UpperCAmelCase : Dict = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCAmelCase : Dict = trainer.evaluate()
trainer.log_metrics("eval" , __lowerCAmelCase )
trainer.save_metrics("eval" , __lowerCAmelCase )
# Write model card and (optionally) push to hub
_UpperCAmelCase : int = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCAmelCase )
else:
trainer.create_model_card(**__lowerCAmelCase )
if __name__ == "__main__":
main()
| 322
| 1
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : int ) ->str:
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) ->str:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(lowerCamelCase__ ) )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[Any] = [sequences]
_UpperCAmelCase : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase__ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCAmelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[Any]=ZeroShotClassificationArgumentHandler() , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = args_parser
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : str=TruncationStrategy.ONLY_FIRST , **lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
_UpperCAmelCase : Optional[Any] = self.tokenizer.eos_token
try:
_UpperCAmelCase : List[str] = self.tokenizer(
lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , )
except Exception as e:
if "too short" in str(lowerCamelCase__ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_UpperCAmelCase : List[Any] = self.tokenizer(
lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def lowerCAmelCase__ ( self : int , **lowerCamelCase__ : Union[str, Any] ) ->Tuple:
'''simple docstring'''
if kwargs.get("multi_class" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : int = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
_UpperCAmelCase : Dict = {}
if "candidate_labels" in kwargs:
_UpperCAmelCase : List[Any] = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
_UpperCAmelCase : Dict = kwargs["hypothesis_template"]
_UpperCAmelCase : List[str] = {}
if "multi_label" in kwargs:
_UpperCAmelCase : Optional[Any] = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self : int , lowerCamelCase__ : Union[str, List[str]] , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[Any] , ) ->Optional[int]:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0:
pass
elif len(lowerCamelCase__ ) == 1 and "candidate_labels" not in kwargs:
_UpperCAmelCase : int = args[0]
else:
raise ValueError(F"""Unable to understand extra arguments {args}""" )
return super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : str="This example is {}." ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self._args_parser(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ):
_UpperCAmelCase : Optional[int] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowerCamelCase__ ) - 1,
**model_input,
}
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Dict = inputs["candidate_label"]
_UpperCAmelCase : Optional[int] = inputs["sequence"]
_UpperCAmelCase : Dict = {k: inputs[k] for k in self.tokenizer.model_input_names}
_UpperCAmelCase : List[Any] = self.model(**lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple=False ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = [outputs["candidate_label"] for outputs in model_outputs]
_UpperCAmelCase : Any = [outputs["sequence"] for outputs in model_outputs]
_UpperCAmelCase : Optional[int] = np.concatenate([output["logits"].numpy() for output in model_outputs] )
_UpperCAmelCase : Optional[Any] = logits.shape[0]
_UpperCAmelCase : Any = len(lowerCamelCase__ )
_UpperCAmelCase : str = N // n
_UpperCAmelCase : str = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowerCamelCase__ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_UpperCAmelCase : int = self.entailment_id
_UpperCAmelCase : List[Any] = -1 if entailment_id == 0 else 0
_UpperCAmelCase : str = reshaped_outputs[..., [contradiction_id, entailment_id]]
_UpperCAmelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ )
_UpperCAmelCase : str = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_UpperCAmelCase : int = reshaped_outputs[..., self.entailment_id]
_UpperCAmelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 322
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase__ = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ = 'RegNetConfig'
# Base docstring
lowerCamelCase__ = 'facebook/regnet-y-040'
lowerCamelCase__ = [1, 1_088, 7, 7]
# Image classification docstring
lowerCamelCase__ = 'facebook/regnet-y-040'
lowerCamelCase__ = 'tabby, tabby cat'
lowerCamelCase__ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 3 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[str] = "relu" , **lowerCamelCase__ : Tuple , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_UpperCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_UpperCAmelCase : Dict = tf.keras.layers.ConvaD(
filters=lowerCamelCase__ , kernel_size=lowerCamelCase__ , strides=lowerCamelCase__ , padding="VALID" , groups=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" , )
_UpperCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
_UpperCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.convolution(self.padding(lowerCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = self.normalization(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = config.num_channels
_UpperCAmelCase : Any = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = shape_list(lowerCamelCase__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_UpperCAmelCase : Optional[Any] = tf.transpose(lowerCamelCase__ , perm=(0, 2, 3, 1) )
_UpperCAmelCase : List[Any] = self.embedder(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : int = tf.keras.layers.ConvaD(
filters=lowerCamelCase__ , kernel_size=1 , strides=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" )
_UpperCAmelCase : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False ) ->tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(lowerCamelCase__ ) , training=lowerCamelCase__ )
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ) ->Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" )
_UpperCAmelCase : int = [
tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.pooler(lowerCamelCase__ )
for layer_module in self.attention:
_UpperCAmelCase : str = layer_module(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = hidden_state * pooled
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : Any ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = in_channels != out_channels or stride != 1
_UpperCAmelCase : List[str] = max(1 , out_channels // config.groups_width )
_UpperCAmelCase : List[str] = (
TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_UpperCAmelCase : Optional[int] = [
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.2" ),
]
_UpperCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = hidden_state
for layer_module in self.layers:
_UpperCAmelCase : List[Any] = layer_module(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.shortcut(lowerCamelCase__ )
hidden_state += residual
_UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = in_channels != out_channels or stride != 1
_UpperCAmelCase : Optional[int] = max(1 , out_channels // config.groups_width )
_UpperCAmelCase : Union[str, Any] = (
TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
_UpperCAmelCase : List[Any] = [
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(lowerCamelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.3" ),
]
_UpperCAmelCase : int = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = hidden_state
for layer_module in self.layers:
_UpperCAmelCase : Tuple = layer_module(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.shortcut(lowerCamelCase__ )
hidden_state += residual
_UpperCAmelCase : Tuple = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : str = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
_UpperCAmelCase : List[str] = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ , name="layers.0" ),
*[layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] ) ->List[str]:
'''simple docstring'''
for layer_module in self.layers:
_UpperCAmelCase : Optional[int] = layer_module(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
_UpperCAmelCase : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , depth=lowerCamelCase__ , name=F"""stages.{i+1}""" ) )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True ) ->TFBaseModelOutputWithNoAttention:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCAmelCase : Optional[Any] = hidden_states + (hidden_state,)
_UpperCAmelCase : Dict = stage_module(lowerCamelCase__ )
if output_hidden_states:
_UpperCAmelCase : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase__ , hidden_states=lowerCamelCase__ )
@keras_serializable
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
lowerCAmelCase : Optional[Any] = RegNetConfig
def __init__( self : Union[str, Any] , lowerCamelCase__ : Any , **lowerCamelCase__ : str ) ->int:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = config
_UpperCAmelCase : Union[str, Any] = TFRegNetEmbeddings(lowerCamelCase__ , name="embedder" )
_UpperCAmelCase : Union[str, Any] = TFRegNetEncoder(lowerCamelCase__ , name="encoder" )
_UpperCAmelCase : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" )
@unpack_inputs
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , ) ->TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
_UpperCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.embedder(lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : str = self.encoder(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : Dict = encoder_outputs[0]
_UpperCAmelCase : Dict = self.pooler(lowerCamelCase__ )
# Change to NCHW output format have uniformity in the modules
_UpperCAmelCase : Union[str, Any] = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) )
_UpperCAmelCase : Tuple = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_UpperCAmelCase : List[str] = tuple([tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase__ , pooler_output=lowerCamelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Tuple = RegNetConfig
lowerCAmelCase : Tuple = "regnet"
lowerCAmelCase : Union[str, Any] = "pixel_values"
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
lowerCamelCase__ = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCamelCase__ = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Any , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ) ->Optional[int]:
'''simple docstring'''
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Any=False , ) ->Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.regnet(
pixel_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Union[str, Any] ) ->Any:
'''simple docstring'''
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = config.num_labels
_UpperCAmelCase : Dict = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" )
# classification head
_UpperCAmelCase : str = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict=False , ) ->Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
_UpperCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.regnet(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : int = outputs.pooler_output if return_dict else outputs[1]
_UpperCAmelCase : Dict = self.classifier[0](lowerCamelCase__ )
_UpperCAmelCase : str = self.classifier[1](lowerCamelCase__ )
_UpperCAmelCase : Tuple = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase__ , logits=lowerCamelCase__ )
if not return_dict:
_UpperCAmelCase : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase__ , logits=lowerCamelCase__ , hidden_states=outputs.hidden_states )
| 322
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 322
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __lowerCAmelCase (__lowerCAmelCase ):
if is_torch_version("<" , "2.0.0" ) or not hasattr(__lowerCAmelCase , "_dynamo" ):
return False
return isinstance(__lowerCAmelCase , torch._dynamo.eval_frame.OptimizedModule )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = True ):
_UpperCAmelCase : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_UpperCAmelCase : Dict = is_compiled_module(__lowerCAmelCase )
if is_compiled:
_UpperCAmelCase : Optional[int] = model
_UpperCAmelCase : Any = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = model.module
if not keep_fpaa_wrapper:
_UpperCAmelCase : List[Any] = getattr(__lowerCAmelCase , "forward" )
_UpperCAmelCase : Dict = model.__dict__.pop("_original_forward" , __lowerCAmelCase )
if original_forward is not None:
while hasattr(__lowerCAmelCase , "__wrapped__" ):
_UpperCAmelCase : Optional[int] = forward.__wrapped__
if forward == original_forward:
break
_UpperCAmelCase : Dict = forward
if getattr(__lowerCAmelCase , "_converted_to_transformer_engine" , __lowerCAmelCase ):
convert_model(__lowerCAmelCase , to_transformer_engine=__lowerCAmelCase )
if is_compiled:
_UpperCAmelCase : int = model
_UpperCAmelCase : str = compiled_model
return model
def __lowerCAmelCase ():
PartialState().wait_for_everyone()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__lowerCAmelCase , __lowerCAmelCase )
elif PartialState().local_process_index == 0:
torch.save(__lowerCAmelCase , __lowerCAmelCase )
@contextmanager
def __lowerCAmelCase (**__lowerCAmelCase ):
for key, value in kwargs.items():
_UpperCAmelCase : str = str(__lowerCAmelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __lowerCAmelCase (__lowerCAmelCase ):
if not hasattr(__lowerCAmelCase , "__qualname__" ) and not hasattr(__lowerCAmelCase , "__name__" ):
_UpperCAmelCase : List[str] = getattr(__lowerCAmelCase , "__class__" , __lowerCAmelCase )
if hasattr(__lowerCAmelCase , "__qualname__" ):
return obj.__qualname__
if hasattr(__lowerCAmelCase , "__name__" ):
return obj.__name__
return str(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
for key, value in source.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = destination.setdefault(__lowerCAmelCase , {} )
merge_dicts(__lowerCAmelCase , __lowerCAmelCase )
else:
_UpperCAmelCase : Optional[int] = value
return destination
def __lowerCAmelCase (__lowerCAmelCase = None ):
if port is None:
_UpperCAmelCase : Tuple = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 322
| 1
|
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_UpperCAmelCase : Dict = cst_fwd.get(__lowerCAmelCase , np.inf )
_UpperCAmelCase : List[Any] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_UpperCAmelCase : int = new_cost_f
_UpperCAmelCase : str = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_UpperCAmelCase : Tuple = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = -1
_UpperCAmelCase : Optional[int] = set()
_UpperCAmelCase : List[Any] = set()
_UpperCAmelCase : str = {source: 0}
_UpperCAmelCase : Union[str, Any] = {destination: 0}
_UpperCAmelCase : Union[str, Any] = {source: None}
_UpperCAmelCase : List[Any] = {destination: None}
_UpperCAmelCase : PriorityQueue[Any] = PriorityQueue()
_UpperCAmelCase : PriorityQueue[Any] = PriorityQueue()
_UpperCAmelCase : Union[str, Any] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_UpperCAmelCase , _UpperCAmelCase : Dict = queue_forward.get()
visited_forward.add(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = queue_backward.get()
visited_backward.add(__lowerCAmelCase )
_UpperCAmelCase : List[str] = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
_UpperCAmelCase : Optional[int] = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_UpperCAmelCase : str = shortest_distance
return shortest_path_distance
lowerCamelCase__ = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
lowerCamelCase__ = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322
| 1
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = int(__lowerCAmelCase )
# Initialize Result
_UpperCAmelCase : Tuple = []
# Traverse through all denomination
for denomination in reversed(__lowerCAmelCase ):
# Find denominations
while int(__lowerCAmelCase ) >= int(__lowerCAmelCase ):
total_value -= int(__lowerCAmelCase )
answer.append(__lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCamelCase__ = []
lowerCamelCase__ = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
lowerCamelCase__ = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
lowerCamelCase__ = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCamelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
lowerCamelCase__ = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(F'''Following is minimal change for {value}: ''')
lowerCamelCase__ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 322
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
lowerCamelCase__ = int(input('Enter number: ').strip())
print(F'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
| 322
| 1
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase : Dict = len(__lowerCAmelCase ), len(grid[0] )
if (
min(__lowerCAmelCase , __lowerCAmelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_UpperCAmelCase : int = 0
count += depth_first_search(__lowerCAmelCase , row + 1 , __lowerCAmelCase , __lowerCAmelCase )
count += depth_first_search(__lowerCAmelCase , row - 1 , __lowerCAmelCase , __lowerCAmelCase )
count += depth_first_search(__lowerCAmelCase , __lowerCAmelCase , col + 1 , __lowerCAmelCase )
count += depth_first_search(__lowerCAmelCase , __lowerCAmelCase , col - 1 , __lowerCAmelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322
|
'''simple docstring'''
from collections.abc import Sequence
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return sum(c * (x**i) for i, c in enumerate(__lowerCAmelCase ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = 0.0
for coeff in reversed(__lowerCAmelCase ):
_UpperCAmelCase : int = result * x + coeff
return result
if __name__ == "__main__":
lowerCamelCase__ = (0.0, 0.0, 5.0, 9.3, 7.0)
lowerCamelCase__ = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 322
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase__ = {
'yjernite/retribert-base-uncased': 512,
}
lowerCamelCase__ = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Dict = VOCAB_FILES_NAMES
lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : int = RetriBertTokenizer
lowerCAmelCase : int = ["input_ids", "attention_mask"]
def __init__( self : int , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : List[Any]="[UNK]" , lowerCamelCase__ : List[Any]="[SEP]" , lowerCamelCase__ : Dict="[PAD]" , lowerCamelCase__ : List[str]="[CLS]" , lowerCamelCase__ : Tuple="[MASK]" , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : int , ) ->int:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
_UpperCAmelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCamelCase__ ) != tokenize_chinese_chars
):
_UpperCAmelCase : Dict = getattr(lowerCamelCase__ , normalizer_state.pop("type" ) )
_UpperCAmelCase : Optional[int] = do_lower_case
_UpperCAmelCase : Optional[Any] = strip_accents
_UpperCAmelCase : str = tokenize_chinese_chars
_UpperCAmelCase : Optional[int] = normalizer_class(**lowerCamelCase__ )
_UpperCAmelCase : List[Any] = do_lower_case
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple=None ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : int = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 322
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
_UpperCAmelCase : Tuple = sum(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_UpperCAmelCase : Any = True
for i in range(1 , s + 1 ):
_UpperCAmelCase : List[Any] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_UpperCAmelCase : Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
_UpperCAmelCase : Any = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_UpperCAmelCase : List[Any] = s - 2 * j
break
return diff
| 322
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : int = CLIPTokenizer
lowerCAmelCase : Dict = CLIPTokenizerFast
lowerCAmelCase : Any = True
lowerCAmelCase : Any = {}
lowerCAmelCase : List[Any] = False
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
super().setUp()
# fmt: off
_UpperCAmelCase : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_UpperCAmelCase : Optional[Any] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
_UpperCAmelCase : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
_UpperCAmelCase : List[str] = {"unk_token": "<unk>"}
_UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[str] , **lowerCamelCase__ : Dict ) ->Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Any , **lowerCamelCase__ : Tuple ) ->Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Optional[int] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = "lower newer"
_UpperCAmelCase : int = "lower newer"
return input_text, output_text
def lowerCAmelCase__ ( self : Tuple ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase : str = "lower newer"
_UpperCAmelCase : Optional[int] = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
_UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : str = tokens + [tokenizer.unk_token]
_UpperCAmelCase : Tuple = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
@require_ftfy
def lowerCAmelCase__ ( self : List[str] ) ->List[str]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
_UpperCAmelCase : Tuple = tokenizer_s.tokenize(lowerCamelCase__ )
_UpperCAmelCase : Dict = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_UpperCAmelCase : List[Any] = "xa\u0303y" + " " + "x\xe3y"
_UpperCAmelCase : Optional[Any] = tokenizer_s.tokenize(lowerCamelCase__ )
_UpperCAmelCase : List[str] = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on unicode of space type
_UpperCAmelCase : int = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_UpperCAmelCase : List[str] = tokenizer_s.tokenize(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
# Test that the tokenization is identical on unicode of line break type
_UpperCAmelCase : int = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_UpperCAmelCase : Tuple = tokenizer_s.tokenize(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = tokenizer_r.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Any ) ->List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase : int = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
_UpperCAmelCase : List[Any] = F"""{text_of_1_token} {text_of_1_token}"""
_UpperCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , )
_UpperCAmelCase : str = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
_UpperCAmelCase : Optional[int] = F""" {text}"""
_UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , )
_UpperCAmelCase : Tuple = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def lowerCAmelCase__ ( self : str ) ->Dict:
'''simple docstring'''
pass
| 322
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "resnet"
lowerCAmelCase : Union[str, Any] = ["basic", "bottleneck"]
def __init__( self : Dict , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Any=64 , lowerCamelCase__ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase__ : int=[3, 4, 6, 3] , lowerCamelCase__ : Dict="bottleneck" , lowerCamelCase__ : Dict="relu" , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Any=None , lowerCamelCase__ : int=None , **lowerCamelCase__ : Tuple , ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : List[str] = embedding_size
_UpperCAmelCase : Tuple = hidden_sizes
_UpperCAmelCase : Dict = depths
_UpperCAmelCase : List[Any] = layer_type
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Tuple = downsample_in_first_stage
_UpperCAmelCase : str = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[Any] = version.parse("1.11" )
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self : str ) ->float:
'''simple docstring'''
return 1E-3
| 322
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 322
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __lowerCAmelCase (__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
_UpperCAmelCase : int = [image]
_UpperCAmelCase : str = [trans(img.convert("RGB" ) ) for img in image]
_UpperCAmelCase : Optional[Any] = torch.stack(__lowerCAmelCase )
return image
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : int ) ->int:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str ) ->Union[str, Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = min(int(num_inference_steps * strength ) , lowerCamelCase__ )
_UpperCAmelCase : str = max(num_inference_steps - init_timestep , 0 )
_UpperCAmelCase : List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any]=None ) ->str:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase__ )}""" )
_UpperCAmelCase : Union[str, Any] = image.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_UpperCAmelCase : List[str] = init_latents.shape
_UpperCAmelCase : Optional[int] = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ )
# get latents
print("add noise to latents at timestep" , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.scheduler.add_noise(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , lowerCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image] = None , lowerCamelCase__ : float = 0.8 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , ) ->Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowerCamelCase__ )
# 2. Preprocess image
_UpperCAmelCase : Dict = preprocess(lowerCamelCase__ )
# 3. set timesteps
self.scheduler.set_timesteps(lowerCamelCase__ , device=self.device )
_UpperCAmelCase , _UpperCAmelCase : Any = self.get_timesteps(lowerCamelCase__ , lowerCamelCase__ , self.device )
_UpperCAmelCase : List[Any] = timesteps[:1].repeat(lowerCamelCase__ )
# 4. Prepare latent variables
_UpperCAmelCase : Optional[int] = self.prepare_latents(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.unet.dtype , self.device , lowerCamelCase__ )
_UpperCAmelCase : Any = latents
# 5. Denoising loop
for t in self.progress_bar(lowerCamelCase__ ):
# 1. predict noise model_output
_UpperCAmelCase : Union[str, Any] = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase : int = self.scheduler.step(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , eta=lowerCamelCase__ , use_clipped_model_output=lowerCamelCase__ , generator=lowerCamelCase__ , ).prev_sample
_UpperCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowerCamelCase__ )
| 322
| 1
|
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
_UpperCAmelCase : List[Any] = TapasConfig.from_json_file(__lowerCAmelCase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase : Optional[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase : int = TapasForQuestionAnswering(config=__lowerCAmelCase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase : Any = 4
_UpperCAmelCase : List[Any] = True
# hparam_utils.py hparams
_UpperCAmelCase : str = 0.6_6_4_6_9_4
_UpperCAmelCase : int = 0.2_0_7_9_5_1
_UpperCAmelCase : List[Any] = 0.1_2_1_1_9_4
_UpperCAmelCase : Any = True
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Union[str, Any] = 0.0_3_5_2_5_1_3
_UpperCAmelCase : Tuple = TapasForQuestionAnswering(config=__lowerCAmelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase : List[str] = 4
_UpperCAmelCase : Optional[int] = False
# hparam_utils.py hparams
_UpperCAmelCase : int = 3_6.4_5_1_9
_UpperCAmelCase : List[Any] = 0.9_0_3_4_2_1
_UpperCAmelCase : int = 2_2_2.0_8_8
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : str = True
_UpperCAmelCase : Optional[int] = 0.7_6_3_1_4_1
_UpperCAmelCase : int = TapasForQuestionAnswering(config=__lowerCAmelCase )
elif task == "TABFACT":
_UpperCAmelCase : Tuple = TapasForSequenceClassification(config=__lowerCAmelCase )
elif task == "MLM":
_UpperCAmelCase : List[str] = TapasForMaskedLM(config=__lowerCAmelCase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase : Optional[Any] = TapasModel(config=__lowerCAmelCase )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__lowerCAmelCase )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
_UpperCAmelCase : Any = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(__lowerCAmelCase )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 322
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
lowerCamelCase__ = list[list[float | int]]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : float
for row in range(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = matrix[row][col]
_UpperCAmelCase : Optional[int] = vector[row][0]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCAmelCase , __lowerCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase : str = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __lowerCAmelCase ):
for row in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCAmelCase )
]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix = [[0] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
for x_val, y_val in enumerate(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase : int = y_val
_UpperCAmelCase : List[str] = solve(__lowerCAmelCase , __lowerCAmelCase )
def interpolated_func(__lowerCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCAmelCase ) )
return interpolated_func
def __lowerCAmelCase (__lowerCAmelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __lowerCAmelCase (__lowerCAmelCase = question_function , __lowerCAmelCase = 10 ):
_UpperCAmelCase : list[int] = [func(__lowerCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Callable[[int], int]
_UpperCAmelCase : int
for poly in polynomials:
_UpperCAmelCase : int = 1
while func(__lowerCAmelCase ) == poly(__lowerCAmelCase ):
x_val += 1
ret += poly(__lowerCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322
| 1
|
'''simple docstring'''
import math
def __lowerCAmelCase ():
_UpperCAmelCase : str = input("Enter message: " )
_UpperCAmelCase : Union[str, Any] = int(input(F"""Enter key [2-{len(__lowerCAmelCase ) - 1}]: """ ) )
_UpperCAmelCase : List[str] = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
_UpperCAmelCase : int = encrypt_message(__lowerCAmelCase , __lowerCAmelCase )
elif mode.lower().startswith("d" ):
_UpperCAmelCase : Optional[int] = decrypt_message(__lowerCAmelCase , __lowerCAmelCase )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + '|'}""" )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = [""] * key
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = col
while pointer < len(__lowerCAmelCase ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = math.ceil(len(__lowerCAmelCase ) / key )
_UpperCAmelCase : str = key
_UpperCAmelCase : List[Any] = (num_cols * num_rows) - len(__lowerCAmelCase )
_UpperCAmelCase : str = [""] * num_cols
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : List[Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
_UpperCAmelCase : int = 0
row += 1
return "".join(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 322
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 322
| 1
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Tuple = "encodec"
def __init__( self : int , lowerCamelCase__ : Optional[int]=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , lowerCamelCase__ : Optional[int]=2_40_00 , lowerCamelCase__ : Any=1 , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Tuple=1_28 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : Dict=1 , lowerCamelCase__ : int=[8, 5, 4, 2] , lowerCamelCase__ : Any="weight_norm" , lowerCamelCase__ : Any=7 , lowerCamelCase__ : Any=7 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : int="reflect" , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : Tuple=1.0 , lowerCamelCase__ : List[str]=10_24 , lowerCamelCase__ : str=None , lowerCamelCase__ : Optional[Any]=True , **lowerCamelCase__ : int , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : int = target_bandwidths
_UpperCAmelCase : List[str] = sampling_rate
_UpperCAmelCase : Dict = audio_channels
_UpperCAmelCase : Union[str, Any] = normalize
_UpperCAmelCase : List[str] = chunk_length_s
_UpperCAmelCase : List[str] = overlap
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Optional[int] = num_filters
_UpperCAmelCase : Dict = num_residual_layers
_UpperCAmelCase : Tuple = upsampling_ratios
_UpperCAmelCase : Any = norm_type
_UpperCAmelCase : Tuple = kernel_size
_UpperCAmelCase : List[Any] = last_kernel_size
_UpperCAmelCase : int = residual_kernel_size
_UpperCAmelCase : List[str] = dilation_growth_rate
_UpperCAmelCase : Any = use_causal_conv
_UpperCAmelCase : Tuple = pad_mode
_UpperCAmelCase : str = compress
_UpperCAmelCase : Optional[Any] = num_lstm_layers
_UpperCAmelCase : Optional[Any] = trim_right_ratio
_UpperCAmelCase : Optional[Any] = codebook_size
_UpperCAmelCase : List[str] = codebook_dim if codebook_dim is not None else hidden_size
_UpperCAmelCase : Any = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**lowerCamelCase__ )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCAmelCase__ ( self : str ) ->Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 322
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar('T')
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : T ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = data
_UpperCAmelCase : Node[T] | None = None
def __str__( self : Any ) ->str:
'''simple docstring'''
return F"""{self.data}"""
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Tuple ) ->None:
'''simple docstring'''
_UpperCAmelCase : Node[T] | None = None
def __iter__( self : List[str] ) ->Iterator[T]:
'''simple docstring'''
_UpperCAmelCase : Any = self.top
while node:
yield node.data
_UpperCAmelCase : Dict = node.next
def __str__( self : Dict ) ->str:
'''simple docstring'''
return "->".join([str(lowerCamelCase__ ) for item in self] )
def __len__( self : Optional[int] ) ->int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def lowerCAmelCase__ ( self : List[Any] ) ->bool:
'''simple docstring'''
return self.top is None
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : T ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = Node(lowerCamelCase__ )
if not self.is_empty():
_UpperCAmelCase : Tuple = self.top
_UpperCAmelCase : List[str] = node
def lowerCAmelCase__ ( self : Union[str, Any] ) ->T:
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.top
_UpperCAmelCase : Optional[Any] = self.top.next
return pop_node.data
def lowerCAmelCase__ ( self : Union[str, Any] ) ->T:
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def lowerCAmelCase__ ( self : List[Any] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 322
| 1
|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=0.9_9_9 , __lowerCAmelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_UpperCAmelCase : Optional[int] = []
for i in range(__lowerCAmelCase ):
_UpperCAmelCase : Any = i / num_diffusion_timesteps
_UpperCAmelCase : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCAmelCase ) / alpha_bar_fn(__lowerCAmelCase ) , __lowerCAmelCase ) )
return torch.tensor(__lowerCAmelCase , dtype=torch.floataa )
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
lowerCAmelCase : Union[str, Any] = 2
@register_to_config
def __init__( self : Optional[Any] , lowerCamelCase__ : int = 10_00 , lowerCamelCase__ : float = 0.0_0_0_8_5 , lowerCamelCase__ : float = 0.0_1_2 , lowerCamelCase__ : str = "linear" , lowerCamelCase__ : Optional[Union[np.ndarray, List[float]]] = None , lowerCamelCase__ : str = "epsilon" , lowerCamelCase__ : str = "linspace" , lowerCamelCase__ : int = 0 , ) ->List[Any]:
'''simple docstring'''
if trained_betas is not None:
_UpperCAmelCase : Dict = torch.tensor(lowerCamelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
_UpperCAmelCase : Any = torch.linspace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase : Dict = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase : Optional[int] = betas_for_alpha_bar(lowerCamelCase__ )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_UpperCAmelCase : Optional[int] = 1.0 - self.betas
_UpperCAmelCase : int = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int=None ) ->Union[str, Any]:
'''simple docstring'''
if schedule_timesteps is None:
_UpperCAmelCase : Tuple = self.timesteps
_UpperCAmelCase : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_UpperCAmelCase : List[Any] = 1 if len(lowerCamelCase__ ) > 1 else 0
else:
_UpperCAmelCase : str = timestep.cpu().item() if torch.is_tensor(lowerCamelCase__ ) else timestep
_UpperCAmelCase : Optional[int] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase__ ( self : int ) ->List[Any]:
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : Union[float, torch.FloatTensor] , ) ->torch.FloatTensor:
'''simple docstring'''
_UpperCAmelCase : Dict = self.index_for_timestep(lowerCamelCase__ )
if self.state_in_first_order:
_UpperCAmelCase : Optional[int] = self.sigmas[step_index]
else:
_UpperCAmelCase : List[Any] = self.sigmas_interpol[step_index]
_UpperCAmelCase : List[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, torch.device] = None , lowerCamelCase__ : Optional[int] = None , ) ->int:
'''simple docstring'''
_UpperCAmelCase : Dict = num_inference_steps
_UpperCAmelCase : Union[str, Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_UpperCAmelCase : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , lowerCamelCase__ , dtype=lowerCamelCase__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_UpperCAmelCase : Optional[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase : Optional[Any] = (np.arange(0 , lowerCamelCase__ ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_UpperCAmelCase : Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase : Any = (np.arange(lowerCamelCase__ , 0 , -step_ratio )).round().copy().astype(lowerCamelCase__ )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_UpperCAmelCase : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_UpperCAmelCase : int = torch.from_numpy(np.log(lowerCamelCase__ ) ).to(lowerCamelCase__ )
_UpperCAmelCase : Dict = np.interp(lowerCamelCase__ , np.arange(0 , len(lowerCamelCase__ ) ) , lowerCamelCase__ )
_UpperCAmelCase : Dict = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_UpperCAmelCase : Dict = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ )
# interpolate sigmas
_UpperCAmelCase : Union[str, Any] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
_UpperCAmelCase : List[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
_UpperCAmelCase : int = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(lowerCamelCase__ ).startswith("mps" ):
# mps does not support float64
_UpperCAmelCase : Dict = torch.from_numpy(lowerCamelCase__ ).to(lowerCamelCase__ , dtype=torch.floataa )
else:
_UpperCAmelCase : int = torch.from_numpy(lowerCamelCase__ ).to(lowerCamelCase__ )
# interpolate timesteps
_UpperCAmelCase : List[str] = self.sigma_to_t(lowerCamelCase__ ).to(lowerCamelCase__ , dtype=timesteps.dtype )
_UpperCAmelCase : Optional[int] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
_UpperCAmelCase : str = torch.cat([timesteps[:1], interleaved_timesteps] )
_UpperCAmelCase : List[str] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_UpperCAmelCase : List[str] = defaultdict(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : List[Any] = sigma.log()
# get distribution
_UpperCAmelCase : List[str] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_UpperCAmelCase : List[str] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
_UpperCAmelCase : Tuple = low_idx + 1
_UpperCAmelCase : List[str] = self.log_sigmas[low_idx]
_UpperCAmelCase : Optional[Any] = self.log_sigmas[high_idx]
# interpolate sigmas
_UpperCAmelCase : int = (low - log_sigma) / (low - high)
_UpperCAmelCase : Tuple = w.clamp(0 , 1 )
# transform interpolation to time range
_UpperCAmelCase : List[str] = (1 - w) * low_idx + w * high_idx
_UpperCAmelCase : Any = t.view(sigma.shape )
return t
@property
def lowerCAmelCase__ ( self : List[Any] ) ->str:
'''simple docstring'''
return self.sample is None
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Union[torch.FloatTensor, np.ndarray] , lowerCamelCase__ : Union[float, torch.FloatTensor] , lowerCamelCase__ : Union[torch.FloatTensor, np.ndarray] , lowerCamelCase__ : bool = True , ) ->Union[SchedulerOutput, Tuple]:
'''simple docstring'''
_UpperCAmelCase : int = self.index_for_timestep(lowerCamelCase__ )
# advance index counter by 1
_UpperCAmelCase : int = timestep.cpu().item() if torch.is_tensor(lowerCamelCase__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_UpperCAmelCase : Any = self.sigmas[step_index]
_UpperCAmelCase : Any = self.sigmas_interpol[step_index + 1]
_UpperCAmelCase : Any = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_UpperCAmelCase : Dict = self.sigmas[step_index - 1]
_UpperCAmelCase : List[str] = self.sigmas_interpol[step_index]
_UpperCAmelCase : Optional[int] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Any = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_UpperCAmelCase : Dict = sigma_hat if self.state_in_first_order else sigma_interpol
_UpperCAmelCase : Tuple = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_UpperCAmelCase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
_UpperCAmelCase : Optional[int] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_UpperCAmelCase : Optional[int] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_UpperCAmelCase : int = sigma_interpol - sigma_hat
# store for 2nd order step
_UpperCAmelCase : List[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_UpperCAmelCase : Tuple = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_UpperCAmelCase : int = sigma_next - sigma_hat
_UpperCAmelCase : Tuple = self.sample
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Union[str, Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : torch.FloatTensor , ) ->torch.FloatTensor:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase__ ):
# mps does not support float64
_UpperCAmelCase : Tuple = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_UpperCAmelCase : Optional[int] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_UpperCAmelCase : Optional[Any] = self.timesteps.to(original_samples.device )
_UpperCAmelCase : Tuple = timesteps.to(original_samples.device )
_UpperCAmelCase : List[Any] = [self.index_for_timestep(lowerCamelCase__ , lowerCamelCase__ ) for t in timesteps]
_UpperCAmelCase : Union[str, Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_UpperCAmelCase : Optional[Any] = sigma.unsqueeze(-1 )
_UpperCAmelCase : str = original_samples + noise * sigma
return noisy_samples
def __len__( self : List[str] ) ->Any:
'''simple docstring'''
return self.config.num_train_timesteps
| 322
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : int = "speech_to_text_2"
lowerCAmelCase : str = ["past_key_values"]
lowerCAmelCase : int = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[Any] , lowerCamelCase__ : Tuple=1_00_00 , lowerCamelCase__ : Any=6 , lowerCamelCase__ : Tuple=20_48 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple="relu" , lowerCamelCase__ : Dict=2_56 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : List[Any]=0.0_2 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Any=1 , lowerCamelCase__ : int=0 , lowerCamelCase__ : str=2 , lowerCamelCase__ : List[Any]=10_24 , **lowerCamelCase__ : str , ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Optional[int] = d_model
_UpperCAmelCase : List[Any] = decoder_ffn_dim
_UpperCAmelCase : Any = decoder_layers
_UpperCAmelCase : int = decoder_attention_heads
_UpperCAmelCase : Any = dropout
_UpperCAmelCase : List[Any] = attention_dropout
_UpperCAmelCase : Optional[int] = activation_dropout
_UpperCAmelCase : List[Any] = activation_function
_UpperCAmelCase : int = init_std
_UpperCAmelCase : Dict = decoder_layerdrop
_UpperCAmelCase : str = use_cache
_UpperCAmelCase : Union[str, Any] = decoder_layers
_UpperCAmelCase : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase : Any = max_target_positions
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
| 322
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : str = AudioLDMPipeline
lowerCAmelCase : int = TEXT_TO_AUDIO_PARAMS
lowerCAmelCase : Optional[Any] = TEXT_TO_AUDIO_BATCH_PARAMS
lowerCAmelCase : Union[str, Any] = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(32, 64) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=lowerCamelCase__ , )
_UpperCAmelCase : List[str] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , projection_dim=32 , )
_UpperCAmelCase : Any = ClapTextModelWithProjection(lowerCamelCase__ )
_UpperCAmelCase : List[str] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=77 )
_UpperCAmelCase : str = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_60_00 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=lowerCamelCase__ , )
_UpperCAmelCase : Dict = SpeechTaHifiGan(lowerCamelCase__ )
_UpperCAmelCase : int = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict=0 ) ->List[str]:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("mps" ):
_UpperCAmelCase : Any = torch.manual_seed(lowerCamelCase__ )
else:
_UpperCAmelCase : str = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_UpperCAmelCase : Tuple = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def lowerCAmelCase__ ( self : Tuple ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Any = self.get_dummy_components()
_UpperCAmelCase : Any = AudioLDMPipeline(**lowerCamelCase__ )
_UpperCAmelCase : int = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : Dict = self.get_dummy_inputs(lowerCamelCase__ )
_UpperCAmelCase : Any = audioldm_pipe(**lowerCamelCase__ )
_UpperCAmelCase : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase__ ) == 2_56
_UpperCAmelCase : Union[str, Any] = audio[:10]
_UpperCAmelCase : int = np.array(
[-0.0_0_5_0, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_3, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_3] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.get_dummy_components()
_UpperCAmelCase : Dict = AudioLDMPipeline(**lowerCamelCase__ )
_UpperCAmelCase : Any = audioldm_pipe.to(lowerCamelCase__ )
_UpperCAmelCase : Dict = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : int = self.get_dummy_inputs(lowerCamelCase__ )
_UpperCAmelCase : str = 3 * [inputs["prompt"]]
# forward
_UpperCAmelCase : int = audioldm_pipe(**lowerCamelCase__ )
_UpperCAmelCase : Any = output.audios[0]
_UpperCAmelCase : Tuple = self.get_dummy_inputs(lowerCamelCase__ )
_UpperCAmelCase : Tuple = 3 * [inputs.pop("prompt" )]
_UpperCAmelCase : Optional[Any] = audioldm_pipe.tokenizer(
lowerCamelCase__ , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCamelCase__ , return_tensors="pt" , )
_UpperCAmelCase : Union[str, Any] = text_inputs["input_ids"].to(lowerCamelCase__ )
_UpperCAmelCase : Tuple = audioldm_pipe.text_encoder(
lowerCamelCase__ , )
_UpperCAmelCase : Union[str, Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_UpperCAmelCase : List[str] = F.normalize(lowerCamelCase__ , dim=-1 )
_UpperCAmelCase : Tuple = prompt_embeds
# forward
_UpperCAmelCase : Dict = audioldm_pipe(**lowerCamelCase__ )
_UpperCAmelCase : int = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
_UpperCAmelCase : Optional[Any] = AudioLDMPipeline(**lowerCamelCase__ )
_UpperCAmelCase : int = audioldm_pipe.to(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : int = self.get_dummy_inputs(lowerCamelCase__ )
_UpperCAmelCase : Tuple = 3 * ["this is a negative prompt"]
_UpperCAmelCase : str = negative_prompt
_UpperCAmelCase : Dict = 3 * [inputs["prompt"]]
# forward
_UpperCAmelCase : Dict = audioldm_pipe(**lowerCamelCase__ )
_UpperCAmelCase : str = output.audios[0]
_UpperCAmelCase : Tuple = self.get_dummy_inputs(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = 3 * [inputs.pop("prompt" )]
_UpperCAmelCase : str = []
for p in [prompt, negative_prompt]:
_UpperCAmelCase : Optional[int] = audioldm_pipe.tokenizer(
lowerCamelCase__ , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCamelCase__ , return_tensors="pt" , )
_UpperCAmelCase : Any = text_inputs["input_ids"].to(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = audioldm_pipe.text_encoder(
lowerCamelCase__ , )
_UpperCAmelCase : Union[str, Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_UpperCAmelCase : Union[str, Any] = F.normalize(lowerCamelCase__ , dim=-1 )
embeds.append(lowerCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase : Tuple = embeds
# forward
_UpperCAmelCase : List[Any] = audioldm_pipe(**lowerCamelCase__ )
_UpperCAmelCase : int = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : int = self.get_dummy_components()
_UpperCAmelCase : Any = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = AudioLDMPipeline(**lowerCamelCase__ )
_UpperCAmelCase : Dict = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : Any = self.get_dummy_inputs(lowerCamelCase__ )
_UpperCAmelCase : int = "egg cracking"
_UpperCAmelCase : Tuple = audioldm_pipe(**lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
_UpperCAmelCase : int = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase__ ) == 2_56
_UpperCAmelCase : Dict = audio[:10]
_UpperCAmelCase : Optional[int] = np.array(
[-0.0_0_5_1, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_4, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_2] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Dict = self.get_dummy_components()
_UpperCAmelCase : str = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = AudioLDMPipeline(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : int = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
_UpperCAmelCase : Optional[Any] = audioldm_pipe(lowerCamelCase__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : List[str] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
_UpperCAmelCase : Union[str, Any] = 2
_UpperCAmelCase : List[str] = audioldm_pipe(lowerCamelCase__ , num_inference_steps=2 , num_waveforms_per_prompt=lowerCamelCase__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : Any = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=lowerCamelCase__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Optional[Any] = self.get_dummy_components()
_UpperCAmelCase : Optional[Any] = AudioLDMPipeline(**lowerCamelCase__ )
_UpperCAmelCase : str = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : Any = audioldm_pipe.vocoder.config.sampling_rate
_UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = audioldm_pipe(audio_length_in_s=0.0_1_6 , **lowerCamelCase__ )
_UpperCAmelCase : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase__ ) / vocoder_sampling_rate == 0.0_1_6
_UpperCAmelCase : int = audioldm_pipe(audio_length_in_s=0.0_3_2 , **lowerCamelCase__ )
_UpperCAmelCase : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase__ ) / vocoder_sampling_rate == 0.0_3_2
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.get_dummy_components()
_UpperCAmelCase : List[str] = AudioLDMPipeline(**lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : Tuple = ["hey"]
_UpperCAmelCase : List[str] = audioldm_pipe(lowerCamelCase__ , num_inference_steps=1 )
_UpperCAmelCase : Any = output.audios.shape
assert audio_shape == (1, 2_56)
_UpperCAmelCase : str = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_UpperCAmelCase : Any = SpeechTaHifiGan(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = audioldm_pipe(lowerCamelCase__ , num_inference_steps=1 )
_UpperCAmelCase : List[Any] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] ) ->Dict:
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowerCamelCase__ )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase__ )
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : str="cpu" , lowerCamelCase__ : Optional[int]=torch.floataa , lowerCamelCase__ : Optional[Any]=0 ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_UpperCAmelCase : Dict = np.random.RandomState(lowerCamelCase__ ).standard_normal((1, 8, 1_28, 16) )
_UpperCAmelCase : Optional[Any] = torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
_UpperCAmelCase : Dict = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
_UpperCAmelCase : Any = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.get_inputs(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = 25
_UpperCAmelCase : Optional[Any] = audioldm_pipe(**lowerCamelCase__ ).audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase__ ) == 8_19_20
_UpperCAmelCase : Optional[Any] = audio[7_72_30:7_72_40]
_UpperCAmelCase : int = np.array(
[-0.4_8_8_4, -0.4_6_0_7, 0.0_0_2_3, 0.5_0_0_7, 0.5_8_9_6, 0.5_1_5_1, 0.3_8_1_3, -0.0_2_0_8, -0.3_6_8_7, -0.4_3_1_5] )
_UpperCAmelCase : Optional[int] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : List[Any] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
_UpperCAmelCase : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_UpperCAmelCase : int = audioldm_pipe.to(lowerCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : Any = self.get_inputs(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = audioldm_pipe(**lowerCamelCase__ ).audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase__ ) == 8_19_20
_UpperCAmelCase : Dict = audio[2_77_80:2_77_90]
_UpperCAmelCase : Union[str, Any] = np.array([-0.2_1_3_1, -0.0_8_7_3, -0.0_1_2_4, -0.0_1_8_9, 0.0_5_6_9, 0.1_3_7_3, 0.1_8_8_3, 0.2_8_8_6, 0.3_2_9_7, 0.2_2_1_2] )
_UpperCAmelCase : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 322
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=100 , __lowerCAmelCase=" " ):
_UpperCAmelCase : Any = text.split(__lowerCAmelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase : Dict = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(__lowerCAmelCase ):
titles.append(title if title is not None else "" )
texts.append(__lowerCAmelCase )
return {"title": titles, "text": texts}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : str = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=__lowerCAmelCase , padding="longest" , return_tensors="pt" )["input_ids"]
_UpperCAmelCase : str = ctx_encoder(input_ids.to(device=__lowerCAmelCase ) , return_dict=__lowerCAmelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
######################################
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_UpperCAmelCase : Optional[int] = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_UpperCAmelCase : Optional[int] = dataset.map(__lowerCAmelCase , batched=__lowerCAmelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
_UpperCAmelCase : Union[str, Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_UpperCAmelCase : Dict = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
_UpperCAmelCase : int = dataset.map(
partial(__lowerCAmelCase , ctx_encoder=__lowerCAmelCase , ctx_tokenizer=__lowerCAmelCase ) , batched=__lowerCAmelCase , batch_size=processing_args.batch_size , features=__lowerCAmelCase , )
# And finally save your dataset
_UpperCAmelCase : List[Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(__lowerCAmelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_UpperCAmelCase : Any = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=__lowerCAmelCase )
# And save the index
_UpperCAmelCase : List[str] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(__lowerCAmelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : str = field(
default=str(Path(UpperCAmelCase__ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
lowerCAmelCase : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
lowerCAmelCase : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
lowerCAmelCase : Optional[str] = field(
default=str(Path(UpperCAmelCase__ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
lowerCAmelCase : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
lowerCAmelCase : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 322
| 1
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Tuple = ["image_processor", "tokenizer"]
lowerCAmelCase : Tuple = "LayoutLMv3ImageProcessor"
lowerCAmelCase : Tuple = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : Dict , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : Optional[Any]=None , **lowerCamelCase__ : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase__ , )
_UpperCAmelCase : Any = kwargs.pop("feature_extractor" )
_UpperCAmelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowerCamelCase__ : Union[List[List[int]], List[List[List[int]]]] = None , lowerCamelCase__ : Optional[Union[List[int], List[List[int]]]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Tuple , ) ->BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
_UpperCAmelCase : Any = self.image_processor(images=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
_UpperCAmelCase : List[str] = features["words"]
_UpperCAmelCase : int = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
# add pixel values
_UpperCAmelCase : int = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_UpperCAmelCase : str = self.get_overflowing_images(lowerCamelCase__ , encoded_inputs["overflow_to_sample_mapping"] )
_UpperCAmelCase : Any = images
return encoded_inputs
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F""" {len(lowerCamelCase__ )} and {len(lowerCamelCase__ )}""" )
return images_with_overflow
def lowerCAmelCase__ ( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : Optional[Any] ) ->str:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Dict ) ->Tuple:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase__ , )
return self.image_processor_class
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase__ , )
return self.image_processor
| 322
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase__ = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.01),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
@classmethod
def lowerCAmelCase__ ( cls : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] ) ->int:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
_UpperCAmelCase : List[str] = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ , repo_id="test-config" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_UpperCAmelCase : Dict = BertConfig.from_pretrained(F"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
_UpperCAmelCase : List[str] = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase__ , repo_id="valid_org/test-config-org" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_UpperCAmelCase : int = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
CustomConfig.register_for_auto_class()
_UpperCAmelCase : int = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
_UpperCAmelCase : str = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_UpperCAmelCase : Any = c.n_embd + 1 # int
_UpperCAmelCase : List[Any] = c.resid_pdrop + 1.0 # float
_UpperCAmelCase : Tuple = not c.scale_attn_weights # bool
_UpperCAmelCase : List[Any] = c.summary_type + "foo" # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(lowerCamelCase__ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(lowerCamelCase__ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(lowerCamelCase__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(lowerCamelCase__ , c.summary_type , "mismatch for key: summary_type" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = PretrainedConfig()
_UpperCAmelCase : Tuple = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
_UpperCAmelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase__ , lowerCamelCase__ )]
if len(lowerCamelCase__ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F""" {', '.join(lowerCamelCase__ )}.""" )
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCAmelCase : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
_UpperCAmelCase : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = mock.Mock()
_UpperCAmelCase : List[str] = 5_00
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : Tuple = HTTPError
_UpperCAmelCase : Any = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase : int = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=lowerCamelCase__ ) as mock_head:
_UpperCAmelCase : Union[str, Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = AutoConfig.from_pretrained("bert-base-cased" )
_UpperCAmelCase : str = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase : Dict = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCamelCase__ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_UpperCAmelCase : Dict = ["config.42.0.0.json"]
_UpperCAmelCase : Union[str, Any] = 7_68
configuration.save_pretrained(lowerCamelCase__ )
shutil.move(os.path.join(lowerCamelCase__ , "config.4.0.0.json" ) , os.path.join(lowerCamelCase__ , "config.42.0.0.json" ) )
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def lowerCAmelCase__ ( self : List[str] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
_UpperCAmelCase : Any = "v4.0.0"
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_UpperCAmelCase : List[Any] = "v3.0.0"
_UpperCAmelCase : int = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 322
| 1
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): # noqa: E741
while r - l > 1:
_UpperCAmelCase : Dict = (l + r) // 2
if v[m] >= key:
_UpperCAmelCase : Any = m
else:
_UpperCAmelCase : Any = m # noqa: E741
return r
def __lowerCAmelCase (__lowerCAmelCase ):
if len(__lowerCAmelCase ) == 0:
return 0
_UpperCAmelCase : int = [0] * len(__lowerCAmelCase )
_UpperCAmelCase : int = 1
_UpperCAmelCase : int = v[0]
for i in range(1 , len(__lowerCAmelCase ) ):
if v[i] < tail[0]:
_UpperCAmelCase : List[str] = v[i]
elif v[i] > tail[length - 1]:
_UpperCAmelCase : Optional[Any] = v[i]
length += 1
else:
_UpperCAmelCase : Any = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322
|
'''simple docstring'''
from manim import *
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : List[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase : Optional[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Dict = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : str = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[Any] = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : int = Text("CPU" , font_size=24 )
_UpperCAmelCase : Any = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(1 )]
_UpperCAmelCase : str = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : int = Text("GPU" , font_size=24 )
_UpperCAmelCase : str = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
gpu.align_to(lowerCamelCase__ , lowerCamelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Any = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[int] = Text("Model" , font_size=24 )
_UpperCAmelCase : Tuple = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , )
_UpperCAmelCase : int = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
_UpperCAmelCase : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase : Union[str, Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ , run_time=2.5 ) , Write(lowerCamelCase__ ) , Write(lowerCamelCase__ ) )
self.add(lowerCamelCase__ )
_UpperCAmelCase : int = []
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Dict = []
for i, rect in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : int = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 )
cpu_target.move_to(lowerCamelCase__ )
cpu_target.generate_target()
_UpperCAmelCase : Dict = 0.4_6 / 4
_UpperCAmelCase : Any = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCamelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase__ , buff=0.0 )
cpu_targs.append(lowerCamelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase__ ) )
second_animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(*lowerCamelCase__ )
self.wait()
| 322
| 1
|
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase : Union[str, Any] = MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase : Optional[int] = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowerCAmelCase__ ( self : Any ) ->Dict:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowerCAmelCase__ ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
_UpperCAmelCase : Optional[Any] = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1E-05, "token": 3_80_15, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1E-05, "token": 2_55_06, "token_str": " accuser"},
] , )
_UpperCAmelCase : Dict = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1E-05,
"token": 3_80_15,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1E-05,
"token": 2_55_06,
"token_str": " accuser",
},
] , )
_UpperCAmelCase : List[str] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2E-05, "token": 1_36_06, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2E-05, "token": 34_99, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9E-05, "token": 29_41, "token_str": " Te"},
] , )
@require_torch
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
_UpperCAmelCase : List[Any] = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2E-05, "token": 3_56_76, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2E-05, "token": 1_64_16, "token_str": "ELS"},
] , )
_UpperCAmelCase : List[str] = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2E-05,
"token": 3_56_76,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2E-05, "token": 1_64_16, "token_str": "ELS"},
] , )
_UpperCAmelCase : int = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1E-05, "token": 34_99, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2E-05, "token": 29_41, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2E-05, "token": 1_36_06, "token_str": " Clara"},
] , )
_UpperCAmelCase : List[Any] = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=6 ) , [
[
{
"score": 2.2E-05,
"token": 3_56_76,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2E-05, "token": 1_64_16, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2E-05,
"token": 3_56_76,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2E-05, "token": 1_64_16, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase : Union[str, Any] = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
@require_torch
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(lowerCamelCase__ )
@slow
@require_tf
def lowerCAmelCase__ ( self : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Tuple = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Tuple ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [
{"sequence": "My name is John", "score": 0.0_0_8, "token": 6_10, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_0_7, "token": 15_73, "token_str": " Chris"},
] , )
_UpperCAmelCase : Union[str, Any] = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_5_1,
"token": 22_01,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_1_4,
"token": 1_27_90,
"token_str": " Lyon",
},
] , )
_UpperCAmelCase : List[str] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [
{"sequence": "My name is Patrick", "score": 0.0_0_5, "token": 34_99, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_0_0, "token": 1_36_06, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_0_0, "token": 29_41, "token_str": " Te"},
] , )
@require_torch
def lowerCAmelCase__ ( self : str ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
_UpperCAmelCase : Dict = None
_UpperCAmelCase : Tuple = None
self.run_pipeline_test(lowerCamelCase__ , [] )
@require_tf
def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : List[Any] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Tuple = None
self.run_pipeline_test(lowerCamelCase__ , [] )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
_UpperCAmelCase : Optional[int] = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = [
F"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = fill_masker.tokenizer
_UpperCAmelCase : int = fill_masker.model
_UpperCAmelCase : str = fill_masker(
F"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
lowerCamelCase__ , [
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
] , )
_UpperCAmelCase : str = fill_masker([F"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
lowerCamelCase__ , [
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
] , )
_UpperCAmelCase : Optional[Any] = fill_masker([F"""This is a {tokenizer.mask_token}""", F"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
lowerCamelCase__ , [
[
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
],
[
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
],
] , )
with self.assertRaises(lowerCamelCase__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowerCamelCase__ ):
fill_masker("This is" )
self.run_test_top_k(lowerCamelCase__ , lowerCamelCase__ )
self.run_test_targets(lowerCamelCase__ , lowerCamelCase__ )
self.run_test_top_k_targets(lowerCamelCase__ , lowerCamelCase__ )
self.fill_mask_with_duplicate_targets_and_top_k(lowerCamelCase__ , lowerCamelCase__ )
self.fill_mask_with_multiple_masks(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = tokenizer.get_vocab()
_UpperCAmelCase : Any = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase : str = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ , targets=lowerCamelCase__ )
_UpperCAmelCase : Tuple = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
lowerCamelCase__ , [
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
] , )
_UpperCAmelCase : Union[str, Any] = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowerCamelCase__ )
_UpperCAmelCase : str = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowerCamelCase__ ) )
# Call argument
_UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ , [
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
] , )
_UpperCAmelCase : Optional[Any] = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowerCamelCase__ ) )
# Score equivalence
_UpperCAmelCase : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=lowerCamelCase__ )
_UpperCAmelCase : List[str] = [top_mask["token_str"] for top_mask in outputs]
_UpperCAmelCase : str = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCamelCase__ ) == set(lowerCamelCase__ ):
_UpperCAmelCase : Dict = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowerCamelCase__ ) , nested_simplify(lowerCamelCase__ ) )
# Raises with invalid
with self.assertRaises(lowerCamelCase__ ):
_UpperCAmelCase : Any = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[""] )
with self.assertRaises(lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets="" )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ , top_k=2 )
_UpperCAmelCase : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
lowerCamelCase__ , [
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
] , )
_UpperCAmelCase : str = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
_UpperCAmelCase : Dict = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
lowerCamelCase__ , [
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
] , )
self.assertEqual(nested_simplify(lowerCamelCase__ ) , nested_simplify(lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase : Any = tokenizer.get_vocab()
_UpperCAmelCase : Tuple = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
# top_k=2, ntargets=3
_UpperCAmelCase : List[Any] = sorted(vocab.keys() )[:3]
_UpperCAmelCase : List[str] = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=lowerCamelCase__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase : Any = [el["token_str"] for el in sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x["score"] , reverse=lowerCamelCase__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCamelCase__ ).issubset(lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=lowerCamelCase__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowerCamelCase__ ) , nested_simplify(lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
_UpperCAmelCase : List[str] = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
_UpperCAmelCase : List[str] = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase : Dict = sorted(vocab.keys() )[:3]
_UpperCAmelCase : str = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase : int = fill_masker(F"""My name is {tokenizer.mask_token}""" , targets=lowerCamelCase__ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowerCamelCase__ ) , 3 )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = fill_masker(
F"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
lowerCamelCase__ , [
[
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
],
[
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
],
[
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
{"sequence": ANY(lowerCamelCase__ ), "score": ANY(lowerCamelCase__ ), "token": ANY(lowerCamelCase__ ), "token_str": ANY(lowerCamelCase__ )},
],
] , )
| 322
|
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1_024 , __lowerCAmelCase=1_024 , __lowerCAmelCase=False , **__lowerCAmelCase ):
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : List[str] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="train" , **__lowerCAmelCase )
_UpperCAmelCase : Dict = tok.pad_token_id
def get_lens(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = tqdm(
DataLoader(__lowerCAmelCase , batch_size=512 , num_workers=8 , shuffle=__lowerCAmelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_UpperCAmelCase : List[str] = []
for batch in dl:
_UpperCAmelCase : Any = batch["input_ids"].ne(__lowerCAmelCase ).sum(1 ).tolist()
_UpperCAmelCase : Tuple = batch["labels"].ne(__lowerCAmelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__lowerCAmelCase , __lowerCAmelCase ):
max_lens.append(max(__lowerCAmelCase , __lowerCAmelCase ) )
else:
max_lens.extend(__lowerCAmelCase )
return max_lens
_UpperCAmelCase : Dict = get_lens(__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="val" , **__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = get_lens(__lowerCAmelCase )
pickle_save(__lowerCAmelCase , train_ds.len_file )
pickle_save(__lowerCAmelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 322
| 1
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __lowerCAmelCase (__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
_UpperCAmelCase : int = [image]
_UpperCAmelCase : str = [trans(img.convert("RGB" ) ) for img in image]
_UpperCAmelCase : Optional[Any] = torch.stack(__lowerCAmelCase )
return image
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : int ) ->int:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str ) ->Union[str, Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = min(int(num_inference_steps * strength ) , lowerCamelCase__ )
_UpperCAmelCase : str = max(num_inference_steps - init_timestep , 0 )
_UpperCAmelCase : List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any]=None ) ->str:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase__ )}""" )
_UpperCAmelCase : Union[str, Any] = image.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_UpperCAmelCase : List[str] = init_latents.shape
_UpperCAmelCase : Optional[int] = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ )
# get latents
print("add noise to latents at timestep" , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.scheduler.add_noise(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , lowerCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image] = None , lowerCamelCase__ : float = 0.8 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , ) ->Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowerCamelCase__ )
# 2. Preprocess image
_UpperCAmelCase : Dict = preprocess(lowerCamelCase__ )
# 3. set timesteps
self.scheduler.set_timesteps(lowerCamelCase__ , device=self.device )
_UpperCAmelCase , _UpperCAmelCase : Any = self.get_timesteps(lowerCamelCase__ , lowerCamelCase__ , self.device )
_UpperCAmelCase : List[Any] = timesteps[:1].repeat(lowerCamelCase__ )
# 4. Prepare latent variables
_UpperCAmelCase : Optional[int] = self.prepare_latents(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.unet.dtype , self.device , lowerCamelCase__ )
_UpperCAmelCase : Any = latents
# 5. Denoising loop
for t in self.progress_bar(lowerCamelCase__ ):
# 1. predict noise model_output
_UpperCAmelCase : Union[str, Any] = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase : int = self.scheduler.step(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , eta=lowerCamelCase__ , use_clipped_model_output=lowerCamelCase__ , generator=lowerCamelCase__ , ).prev_sample
_UpperCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowerCamelCase__ )
| 322
|
'''simple docstring'''
import pytest
lowerCamelCase__ = '__dummy_dataset1__'
lowerCamelCase__ = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __lowerCAmelCase ():
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __lowerCAmelCase ():
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = dataset_loading_script_name
_UpperCAmelCase : Any = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = script_dir / F"""{script_name}.py"""
with open(__lowerCAmelCase , "w" ) as f:
f.write(__lowerCAmelCase )
return str(__lowerCAmelCase )
| 322
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False ):
_UpperCAmelCase : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
for i in range(config.num_hidden_layers ):
_UpperCAmelCase : List[str] = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : List[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
_UpperCAmelCase : int = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Any = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase : Tuple = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = dct.pop(__lowerCAmelCase )
_UpperCAmelCase : int = val
@torch.no_grad()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : List[Any] = False
if "vqa" in checkpoint_url:
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : List[str] = 3_129
_UpperCAmelCase : Dict = "huggingface/label-files"
_UpperCAmelCase : Any = "vqa2-id2label.json"
_UpperCAmelCase : int = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : Dict = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : List[Any] = idalabel
_UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
_UpperCAmelCase : int = ViltForQuestionAnswering(__lowerCAmelCase )
elif "nlvr" in checkpoint_url:
_UpperCAmelCase : int = True
_UpperCAmelCase : str = 2
_UpperCAmelCase : Optional[Any] = {0: "False", 1: "True"}
_UpperCAmelCase : Optional[int] = {v: k for k, v in config.idalabel.items()}
_UpperCAmelCase : int = 3
_UpperCAmelCase : str = ViltForImagesAndTextClassification(__lowerCAmelCase )
elif "irtr" in checkpoint_url:
_UpperCAmelCase : Any = True
_UpperCAmelCase : List[str] = ViltForImageAndTextRetrieval(__lowerCAmelCase )
elif "mlm_itm" in checkpoint_url:
_UpperCAmelCase : str = True
_UpperCAmelCase : Tuple = ViltForMaskedLM(__lowerCAmelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase : List[str] = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="cpu" )["state_dict"]
_UpperCAmelCase : Optional[int] = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
if mlm_model or irtr_model:
_UpperCAmelCase : Tuple = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_UpperCAmelCase , _UpperCAmelCase : Dict = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__lowerCAmelCase )
# Define processor
_UpperCAmelCase : Union[str, Any] = ViltImageProcessor(size=384 )
_UpperCAmelCase : Any = BertTokenizer.from_pretrained("bert-base-uncased" )
_UpperCAmelCase : Optional[Any] = ViltProcessor(__lowerCAmelCase , __lowerCAmelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_UpperCAmelCase : Union[str, Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=__lowerCAmelCase ).raw )
_UpperCAmelCase : Optional[int] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=__lowerCAmelCase ).raw )
_UpperCAmelCase : int = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
_UpperCAmelCase : Dict = processor(__lowerCAmelCase , __lowerCAmelCase , return_tensors="pt" )
_UpperCAmelCase : List[Any] = processor(__lowerCAmelCase , __lowerCAmelCase , return_tensors="pt" )
_UpperCAmelCase : Tuple = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_UpperCAmelCase : int = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=__lowerCAmelCase ).raw )
if mlm_model:
_UpperCAmelCase : Optional[int] = "a bunch of [MASK] laying on a [MASK]."
else:
_UpperCAmelCase : Tuple = "How many cats are there?"
_UpperCAmelCase : str = processor(__lowerCAmelCase , __lowerCAmelCase , return_tensors="pt" )
_UpperCAmelCase : Union[str, Any] = model(**__lowerCAmelCase )
# Verify outputs
if mlm_model:
_UpperCAmelCase : Union[str, Any] = torch.Size([1, 11, 30_522] )
_UpperCAmelCase : Union[str, Any] = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowerCAmelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_UpperCAmelCase : Union[str, Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_UpperCAmelCase : Dict = torch.Size([1, 3_129] )
_UpperCAmelCase : Dict = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowerCAmelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_UpperCAmelCase : List[Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_UpperCAmelCase : str = torch.Size([1, 2] )
_UpperCAmelCase : str = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCamelCase__ = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 322
|
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase__ = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCamelCase__ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCamelCase__ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCamelCase__ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] ) ->int:
'''simple docstring'''
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int=0.9 , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : Dict=0.5 ) ->Any:
'''simple docstring'''
if NLTK_VERSION >= version.Version("3.6.5" ):
_UpperCAmelCase : Dict = [
meteor_score.single_meteor_score(
word_tokenize(lowerCamelCase__ ) , word_tokenize(lowerCamelCase__ ) , alpha=lowerCamelCase__ , beta=lowerCamelCase__ , gamma=lowerCamelCase__ )
for ref, pred in zip(lowerCamelCase__ , lowerCamelCase__ )
]
else:
_UpperCAmelCase : Optional[int] = [
meteor_score.single_meteor_score(lowerCamelCase__ , lowerCamelCase__ , alpha=lowerCamelCase__ , beta=lowerCamelCase__ , gamma=lowerCamelCase__ )
for ref, pred in zip(lowerCamelCase__ , lowerCamelCase__ )
]
return {"meteor": np.mean(lowerCamelCase__ )}
| 322
| 1
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase (__lowerCAmelCase ):
create_state_space_tree(__lowerCAmelCase , [] , 0 , [0 for i in range(len(__lowerCAmelCase ) )] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
if index == len(__lowerCAmelCase ):
print(__lowerCAmelCase )
return
for i in range(len(__lowerCAmelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_UpperCAmelCase : int = True
create_state_space_tree(__lowerCAmelCase , __lowerCAmelCase , index + 1 , __lowerCAmelCase )
current_sequence.pop()
_UpperCAmelCase : Dict = False
lowerCamelCase__ = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCamelCase__ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 322
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : int ) ->str:
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) ->str:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(lowerCamelCase__ ) )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[Any] = [sequences]
_UpperCAmelCase : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase__ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCAmelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[Any]=ZeroShotClassificationArgumentHandler() , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = args_parser
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : str=TruncationStrategy.ONLY_FIRST , **lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
_UpperCAmelCase : Optional[Any] = self.tokenizer.eos_token
try:
_UpperCAmelCase : List[str] = self.tokenizer(
lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , )
except Exception as e:
if "too short" in str(lowerCamelCase__ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_UpperCAmelCase : List[Any] = self.tokenizer(
lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def lowerCAmelCase__ ( self : int , **lowerCamelCase__ : Union[str, Any] ) ->Tuple:
'''simple docstring'''
if kwargs.get("multi_class" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : int = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
_UpperCAmelCase : Dict = {}
if "candidate_labels" in kwargs:
_UpperCAmelCase : List[Any] = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
_UpperCAmelCase : Dict = kwargs["hypothesis_template"]
_UpperCAmelCase : List[str] = {}
if "multi_label" in kwargs:
_UpperCAmelCase : Optional[Any] = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self : int , lowerCamelCase__ : Union[str, List[str]] , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[Any] , ) ->Optional[int]:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0:
pass
elif len(lowerCamelCase__ ) == 1 and "candidate_labels" not in kwargs:
_UpperCAmelCase : int = args[0]
else:
raise ValueError(F"""Unable to understand extra arguments {args}""" )
return super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : str="This example is {}." ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self._args_parser(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ):
_UpperCAmelCase : Optional[int] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowerCamelCase__ ) - 1,
**model_input,
}
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Dict = inputs["candidate_label"]
_UpperCAmelCase : Optional[int] = inputs["sequence"]
_UpperCAmelCase : Dict = {k: inputs[k] for k in self.tokenizer.model_input_names}
_UpperCAmelCase : List[Any] = self.model(**lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple=False ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = [outputs["candidate_label"] for outputs in model_outputs]
_UpperCAmelCase : Any = [outputs["sequence"] for outputs in model_outputs]
_UpperCAmelCase : Optional[int] = np.concatenate([output["logits"].numpy() for output in model_outputs] )
_UpperCAmelCase : Optional[Any] = logits.shape[0]
_UpperCAmelCase : Any = len(lowerCamelCase__ )
_UpperCAmelCase : str = N // n
_UpperCAmelCase : str = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowerCamelCase__ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_UpperCAmelCase : int = self.entailment_id
_UpperCAmelCase : List[Any] = -1 if entailment_id == 0 else 0
_UpperCAmelCase : str = reshaped_outputs[..., [contradiction_id, entailment_id]]
_UpperCAmelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ )
_UpperCAmelCase : str = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_UpperCAmelCase : int = reshaped_outputs[..., self.entailment_id]
_UpperCAmelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 322
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any=13 , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : Dict=2_24 , lowerCamelCase__ : Dict=30 , lowerCamelCase__ : int=4_00 , lowerCamelCase__ : int=True , lowerCamelCase__ : str=None , lowerCamelCase__ : Any=True , lowerCamelCase__ : str=[0.5, 0.5, 0.5] , lowerCamelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = size if size is not None else {"height": 18, "width": 18}
_UpperCAmelCase : int = parent
_UpperCAmelCase : Dict = batch_size
_UpperCAmelCase : List[str] = num_channels
_UpperCAmelCase : Union[str, Any] = image_size
_UpperCAmelCase : str = min_resolution
_UpperCAmelCase : Union[str, Any] = max_resolution
_UpperCAmelCase : List[Any] = do_resize
_UpperCAmelCase : Any = size
_UpperCAmelCase : List[str] = do_normalize
_UpperCAmelCase : Optional[Any] = image_mean
_UpperCAmelCase : Union[str, Any] = image_std
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = ViTImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : int ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Any = EfficientFormerImageProcessorTester(self )
@property
def lowerCAmelCase__ ( self : str ) ->Dict:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "size" ) )
def lowerCAmelCase__ ( self : int ) ->str:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
_UpperCAmelCase : List[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_UpperCAmelCase : List[str] = image_processor(lowerCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Optional[int] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_UpperCAmelCase : Tuple = image_processor(lowerCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
_UpperCAmelCase : Tuple = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_UpperCAmelCase : List[str] = image_processor(lowerCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 322
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase = 4_000_000 ):
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase , _UpperCAmelCase : Dict = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : Any = b, a + b
return sum(__lowerCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322
| 1
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str]=13 , lowerCamelCase__ : Optional[Any]=7 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : int=99 , lowerCamelCase__ : int=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Optional[Any]=4 , lowerCamelCase__ : Optional[int]=37 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Optional[int]=16 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Union[str, Any]=0.0_2 , lowerCamelCase__ : Tuple=4 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : Optional[int] = seq_length
_UpperCAmelCase : int = is_training
_UpperCAmelCase : Dict = use_attention_mask
_UpperCAmelCase : Optional[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : List[Any] = type_sequence_label_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Dict = num_choices
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Dict = None
if self.use_attention_mask:
_UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : int = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = config_and_inputs
_UpperCAmelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class_name.from_pretrained("albert-base-v2" )
_UpperCAmelCase : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_UpperCAmelCase : List[Any] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCAmelCase : Dict = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
_UpperCAmelCase : List[Any] = (1, 11, 7_68)
self.assertEqual(output.shape , lowerCamelCase__ )
_UpperCAmelCase : str = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1E-4 ) )
| 322
| 1
|
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def __lowerCAmelCase (__lowerCAmelCase ):
if num <= 0:
raise ValueError("math domain error" )
return quad(__lowerCAmelCase , 0 , __lowerCAmelCase , args=(__lowerCAmelCase) )[0]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return math.pow(__lowerCAmelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 322
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
lowerCamelCase__ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __lowerCAmelCase (__lowerCAmelCase ):
with open(__lowerCAmelCase , "rb" ) as f:
_UpperCAmelCase : List[str] = Image.open(__lowerCAmelCase )
return im.convert("RGB" )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the training data."} )
lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the validation data."} )
lowerCAmelCase : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : str = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCAmelCase__ )} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
lowerCAmelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase : str = field(default=UpperCAmelCase__ , metadata={"help": "Name or path of preprocessor config."} )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = torch.stack([example["pixel_values"] for example in examples] )
_UpperCAmelCase : Tuple = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __lowerCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , __lowerCAmelCase , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_UpperCAmelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_UpperCAmelCase : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
_UpperCAmelCase : List[Any] = {}
if data_args.train_dir is not None:
_UpperCAmelCase : str = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
_UpperCAmelCase : Optional[Any] = os.path.join(data_args.validation_dir , "**" )
_UpperCAmelCase : Any = load_dataset(
"imagefolder" , data_files=__lowerCAmelCase , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
_UpperCAmelCase : int = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowerCAmelCase ) and data_args.train_val_split > 0.0:
_UpperCAmelCase : List[Any] = dataset["train"].train_test_split(data_args.train_val_split )
_UpperCAmelCase : List[str] = split["train"]
_UpperCAmelCase : Union[str, Any] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_UpperCAmelCase : Optional[int] = dataset["train"].features["labels"].names
_UpperCAmelCase , _UpperCAmelCase : int = {}, {}
for i, label in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : int = str(__lowerCAmelCase )
_UpperCAmelCase : str = label
# Load the accuracy metric from the datasets package
_UpperCAmelCase : int = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCAmelCase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowerCAmelCase ) , labelaid=__lowerCAmelCase , idalabel=__lowerCAmelCase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase : List[str] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
_UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_UpperCAmelCase : int = image_processor.size["shortest_edge"]
else:
_UpperCAmelCase : int = (image_processor.size["height"], image_processor.size["width"])
_UpperCAmelCase : str = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
_UpperCAmelCase : Optional[int] = Compose(
[
RandomResizedCrop(__lowerCAmelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_UpperCAmelCase : Union[str, Any] = Compose(
[
Resize(__lowerCAmelCase ),
CenterCrop(__lowerCAmelCase ),
ToTensor(),
normalize,
] )
def train_transforms(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_UpperCAmelCase : Dict = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__lowerCAmelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_UpperCAmelCase : Optional[Any] = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__lowerCAmelCase )
# Initalize our trainer
_UpperCAmelCase : Union[str, Any] = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase : Any = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : int = last_checkpoint
_UpperCAmelCase : Dict = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCAmelCase : Dict = trainer.evaluate()
trainer.log_metrics("eval" , __lowerCAmelCase )
trainer.save_metrics("eval" , __lowerCAmelCase )
# Write model card and (optionally) push to hub
_UpperCAmelCase : int = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCAmelCase )
else:
trainer.create_model_card(**__lowerCAmelCase )
if __name__ == "__main__":
main()
| 322
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
def __init__( self : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Dict=32 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : str=10 , lowerCamelCase__ : int=[10, 20, 30, 40] , lowerCamelCase__ : Tuple=[1, 1, 2, 1] , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : str=True , lowerCamelCase__ : str="relu" , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : int=None , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Any = embeddings_size
_UpperCAmelCase : Tuple = hidden_sizes
_UpperCAmelCase : Union[str, Any] = depths
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Optional[int] = use_labels
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : Dict = num_labels
_UpperCAmelCase : List[str] = scope
_UpperCAmelCase : str = len(lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : Dict = None
if self.use_labels:
_UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = RegNetModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : str = model(lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = self.num_labels
_UpperCAmelCase : Dict = RegNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : List[Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : int = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = config_and_inputs
_UpperCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
lowerCAmelCase : List[Any] = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : Dict = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : str = False
def lowerCAmelCase__ ( self : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase : str = RegNetModelTester(self )
_UpperCAmelCase : List[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Any ) ->Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def lowerCAmelCase__ ( self : int ) ->str:
'''simple docstring'''
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def lowerCAmelCase__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Dict = model_class(lowerCamelCase__ )
_UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCAmelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : str = model_class(config=lowerCamelCase__ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str ):
_UpperCAmelCase : Optional[Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Tuple = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_UpperCAmelCase : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase : str = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Union[str, Any] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCAmelCase : Tuple = layer_type
_UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : str ) ->Any:
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : int = RegNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __lowerCAmelCase ():
_UpperCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase : int = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase__ )
_UpperCAmelCase : Any = self.default_image_processor
_UpperCAmelCase : Optional[int] = prepare_img()
_UpperCAmelCase : List[str] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Any = model(**lowerCamelCase__ )
# verify the logits
_UpperCAmelCase : Optional[int] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 322
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase__ = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ = 'RegNetConfig'
# Base docstring
lowerCamelCase__ = 'facebook/regnet-y-040'
lowerCamelCase__ = [1, 1_088, 7, 7]
# Image classification docstring
lowerCamelCase__ = 'facebook/regnet-y-040'
lowerCamelCase__ = 'tabby, tabby cat'
lowerCamelCase__ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 3 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[str] = "relu" , **lowerCamelCase__ : Tuple , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_UpperCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_UpperCAmelCase : Dict = tf.keras.layers.ConvaD(
filters=lowerCamelCase__ , kernel_size=lowerCamelCase__ , strides=lowerCamelCase__ , padding="VALID" , groups=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" , )
_UpperCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
_UpperCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.convolution(self.padding(lowerCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = self.normalization(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = config.num_channels
_UpperCAmelCase : Any = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = shape_list(lowerCamelCase__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_UpperCAmelCase : Optional[Any] = tf.transpose(lowerCamelCase__ , perm=(0, 2, 3, 1) )
_UpperCAmelCase : List[Any] = self.embedder(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : int = tf.keras.layers.ConvaD(
filters=lowerCamelCase__ , kernel_size=1 , strides=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" )
_UpperCAmelCase : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False ) ->tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(lowerCamelCase__ ) , training=lowerCamelCase__ )
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ) ->Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" )
_UpperCAmelCase : int = [
tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.pooler(lowerCamelCase__ )
for layer_module in self.attention:
_UpperCAmelCase : str = layer_module(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = hidden_state * pooled
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : Any ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = in_channels != out_channels or stride != 1
_UpperCAmelCase : List[str] = max(1 , out_channels // config.groups_width )
_UpperCAmelCase : List[str] = (
TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_UpperCAmelCase : Optional[int] = [
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.2" ),
]
_UpperCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = hidden_state
for layer_module in self.layers:
_UpperCAmelCase : List[Any] = layer_module(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.shortcut(lowerCamelCase__ )
hidden_state += residual
_UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = in_channels != out_channels or stride != 1
_UpperCAmelCase : Optional[int] = max(1 , out_channels // config.groups_width )
_UpperCAmelCase : Union[str, Any] = (
TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
_UpperCAmelCase : List[Any] = [
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(lowerCamelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.3" ),
]
_UpperCAmelCase : int = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = hidden_state
for layer_module in self.layers:
_UpperCAmelCase : Tuple = layer_module(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.shortcut(lowerCamelCase__ )
hidden_state += residual
_UpperCAmelCase : Tuple = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : str = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
_UpperCAmelCase : List[str] = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ , name="layers.0" ),
*[layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] ) ->List[str]:
'''simple docstring'''
for layer_module in self.layers:
_UpperCAmelCase : Optional[int] = layer_module(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
_UpperCAmelCase : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , depth=lowerCamelCase__ , name=F"""stages.{i+1}""" ) )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True ) ->TFBaseModelOutputWithNoAttention:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCAmelCase : Optional[Any] = hidden_states + (hidden_state,)
_UpperCAmelCase : Dict = stage_module(lowerCamelCase__ )
if output_hidden_states:
_UpperCAmelCase : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase__ , hidden_states=lowerCamelCase__ )
@keras_serializable
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
lowerCAmelCase : Optional[Any] = RegNetConfig
def __init__( self : Union[str, Any] , lowerCamelCase__ : Any , **lowerCamelCase__ : str ) ->int:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = config
_UpperCAmelCase : Union[str, Any] = TFRegNetEmbeddings(lowerCamelCase__ , name="embedder" )
_UpperCAmelCase : Union[str, Any] = TFRegNetEncoder(lowerCamelCase__ , name="encoder" )
_UpperCAmelCase : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" )
@unpack_inputs
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , ) ->TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
_UpperCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.embedder(lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : str = self.encoder(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : Dict = encoder_outputs[0]
_UpperCAmelCase : Dict = self.pooler(lowerCamelCase__ )
# Change to NCHW output format have uniformity in the modules
_UpperCAmelCase : Union[str, Any] = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) )
_UpperCAmelCase : Tuple = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_UpperCAmelCase : List[str] = tuple([tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase__ , pooler_output=lowerCamelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Tuple = RegNetConfig
lowerCAmelCase : Tuple = "regnet"
lowerCAmelCase : Union[str, Any] = "pixel_values"
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
lowerCamelCase__ = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCamelCase__ = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Any , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ) ->Optional[int]:
'''simple docstring'''
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Any=False , ) ->Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.regnet(
pixel_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Union[str, Any] ) ->Any:
'''simple docstring'''
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = config.num_labels
_UpperCAmelCase : Dict = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" )
# classification head
_UpperCAmelCase : str = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict=False , ) ->Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
_UpperCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.regnet(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : int = outputs.pooler_output if return_dict else outputs[1]
_UpperCAmelCase : Dict = self.classifier[0](lowerCamelCase__ )
_UpperCAmelCase : str = self.classifier[1](lowerCamelCase__ )
_UpperCAmelCase : Tuple = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase__ , logits=lowerCamelCase__ )
if not return_dict:
_UpperCAmelCase : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase__ , logits=lowerCamelCase__ , hidden_states=outputs.hidden_states )
| 322
| 1
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : Any=4 , lowerCamelCase__ : int=2 , lowerCamelCase__ : int=7 , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : int=99 , lowerCamelCase__ : int=36 , lowerCamelCase__ : Any=2 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : Optional[Any]="gelu" , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Any=5_12 , lowerCamelCase__ : List[Any]=16 , lowerCamelCase__ : int=2 , lowerCamelCase__ : List[Any]=0.0_2 , lowerCamelCase__ : Dict=6 , lowerCamelCase__ : Union[str, Any]=6 , lowerCamelCase__ : Any=3 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : int=None , lowerCamelCase__ : Dict=10_00 , ) ->str:
'''simple docstring'''
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : List[str] = batch_size
_UpperCAmelCase : Union[str, Any] = num_channels
_UpperCAmelCase : Dict = image_size
_UpperCAmelCase : List[str] = patch_size
_UpperCAmelCase : str = is_training
_UpperCAmelCase : Union[str, Any] = use_input_mask
_UpperCAmelCase : str = use_token_type_ids
_UpperCAmelCase : Optional[int] = use_labels
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Tuple = hidden_act
_UpperCAmelCase : Any = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : Dict = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : Any = coordinate_size
_UpperCAmelCase : Optional[int] = shape_size
_UpperCAmelCase : int = num_labels
_UpperCAmelCase : Tuple = num_choices
_UpperCAmelCase : str = scope
_UpperCAmelCase : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCAmelCase : Tuple = text_seq_length
_UpperCAmelCase : List[Any] = (image_size // patch_size) ** 2 + 1
_UpperCAmelCase : int = self.text_seq_length + self.image_seq_length
def lowerCAmelCase__ ( self : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
_UpperCAmelCase : List[Any] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase : List[Any] = bbox[i, j, 3]
_UpperCAmelCase : Any = bbox[i, j, 1]
_UpperCAmelCase : Optional[int] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase : Union[str, Any] = bbox[i, j, 2]
_UpperCAmelCase : str = bbox[i, j, 0]
_UpperCAmelCase : List[str] = tmp_coordinate
_UpperCAmelCase : Union[str, Any] = tf.constant(lowerCamelCase__ )
_UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : str = None
if self.use_input_mask:
_UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCAmelCase : List[str] = None
if self.use_token_type_ids:
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_UpperCAmelCase : Any = None
_UpperCAmelCase : Tuple = None
if self.use_labels:
_UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_UpperCAmelCase : List[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Dict = TFLayoutLMvaModel(config=lowerCamelCase__ )
# text + image
_UpperCAmelCase : str = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , training=lowerCamelCase__ , )
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCAmelCase : Optional[Any] = model({"pixel_values": pixel_values} , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Any = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : int = self.num_labels
_UpperCAmelCase : Tuple = TFLayoutLMvaForTokenClassification(config=lowerCamelCase__ )
_UpperCAmelCase : Dict = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : Union[str, Any] = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase__ )
_UpperCAmelCase : Dict = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[str] = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase : List[str] = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCAmelCase : Tuple = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Optional[Any] = False
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : int ) ->Tuple:
'''simple docstring'''
return True
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Dict=False ) ->dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = copy.deepcopy(lowerCamelCase__ )
if model_class in get_values(lowerCamelCase__ ):
_UpperCAmelCase : Optional[Any] = {
k: tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCamelCase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
_UpperCAmelCase : List[str] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
_UpperCAmelCase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
_UpperCAmelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
_UpperCAmelCase : Optional[Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Dict = TFLayoutLMvaModelTester(self )
_UpperCAmelCase : Tuple = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def lowerCAmelCase__ ( self : Any ) ->str:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ )
if getattr(lowerCamelCase__ , "hf_compute_loss" , lowerCamelCase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
_UpperCAmelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
_UpperCAmelCase : int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase__ )[0]
]
_UpperCAmelCase : str = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_UpperCAmelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = prepared_for_class.pop("input_ids" )
_UpperCAmelCase : Dict = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_UpperCAmelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = prepared_for_class.pop("input_ids" )
if "labels" in prepared_for_class:
_UpperCAmelCase : List[str] = prepared_for_class["labels"].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_UpperCAmelCase : Union[str, Any] = -1_00
_UpperCAmelCase : List[str] = tf.convert_to_tensor(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_UpperCAmelCase : int = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
_UpperCAmelCase : Any = model(lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_UpperCAmelCase : Any = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
# Get keys that were added with the _prepare_for_class function
_UpperCAmelCase : Dict = prepared_for_class.keys() - inputs_dict.keys()
_UpperCAmelCase : List[str] = inspect.signature(model.call ).parameters
_UpperCAmelCase : Optional[Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_UpperCAmelCase : Optional[Any] = {0: "input_ids"}
for label_key in label_keys:
_UpperCAmelCase : Any = signature_names.index(lowerCamelCase__ )
_UpperCAmelCase : Dict = label_key
_UpperCAmelCase : Optional[Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_UpperCAmelCase : int = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_UpperCAmelCase : Union[str, Any] = prepared_for_class[value]
_UpperCAmelCase : str = tuple(lowerCamelCase__ )
# Send to model
_UpperCAmelCase : Any = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowerCAmelCase__ ( self : int ) ->str:
'''simple docstring'''
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->int:
'''simple docstring'''
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase : Any = type
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->Any:
'''simple docstring'''
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Any ) ->Tuple:
'''simple docstring'''
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Tuple ) ->List[Any]:
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Any = TFLayoutLMvaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __lowerCAmelCase ():
_UpperCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->Dict:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" )
_UpperCAmelCase : List[Any] = self.default_image_processor
_UpperCAmelCase : Optional[int] = prepare_img()
_UpperCAmelCase : int = image_processor(images=lowerCamelCase__ , return_tensors="tf" ).pixel_values
_UpperCAmelCase : Optional[int] = tf.constant([[1, 2]] )
_UpperCAmelCase : List[str] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
_UpperCAmelCase : List[Any] = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
# verify the logits
_UpperCAmelCase : Dict = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 322
|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __lowerCAmelCase (__lowerCAmelCase ):
if is_torch_version("<" , "2.0.0" ) or not hasattr(__lowerCAmelCase , "_dynamo" ):
return False
return isinstance(__lowerCAmelCase , torch._dynamo.eval_frame.OptimizedModule )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = True ):
_UpperCAmelCase : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_UpperCAmelCase : Dict = is_compiled_module(__lowerCAmelCase )
if is_compiled:
_UpperCAmelCase : Optional[int] = model
_UpperCAmelCase : Any = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = model.module
if not keep_fpaa_wrapper:
_UpperCAmelCase : List[Any] = getattr(__lowerCAmelCase , "forward" )
_UpperCAmelCase : Dict = model.__dict__.pop("_original_forward" , __lowerCAmelCase )
if original_forward is not None:
while hasattr(__lowerCAmelCase , "__wrapped__" ):
_UpperCAmelCase : Optional[int] = forward.__wrapped__
if forward == original_forward:
break
_UpperCAmelCase : Dict = forward
if getattr(__lowerCAmelCase , "_converted_to_transformer_engine" , __lowerCAmelCase ):
convert_model(__lowerCAmelCase , to_transformer_engine=__lowerCAmelCase )
if is_compiled:
_UpperCAmelCase : int = model
_UpperCAmelCase : str = compiled_model
return model
def __lowerCAmelCase ():
PartialState().wait_for_everyone()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__lowerCAmelCase , __lowerCAmelCase )
elif PartialState().local_process_index == 0:
torch.save(__lowerCAmelCase , __lowerCAmelCase )
@contextmanager
def __lowerCAmelCase (**__lowerCAmelCase ):
for key, value in kwargs.items():
_UpperCAmelCase : str = str(__lowerCAmelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __lowerCAmelCase (__lowerCAmelCase ):
if not hasattr(__lowerCAmelCase , "__qualname__" ) and not hasattr(__lowerCAmelCase , "__name__" ):
_UpperCAmelCase : List[str] = getattr(__lowerCAmelCase , "__class__" , __lowerCAmelCase )
if hasattr(__lowerCAmelCase , "__qualname__" ):
return obj.__qualname__
if hasattr(__lowerCAmelCase , "__name__" ):
return obj.__name__
return str(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
for key, value in source.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = destination.setdefault(__lowerCAmelCase , {} )
merge_dicts(__lowerCAmelCase , __lowerCAmelCase )
else:
_UpperCAmelCase : Optional[int] = value
return destination
def __lowerCAmelCase (__lowerCAmelCase = None ):
if port is None:
_UpperCAmelCase : Tuple = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 322
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['DeiTFeatureExtractor']
lowerCamelCase__ = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 322
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322
| 1
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
lowerCamelCase__ = 'scheduler_config.json'
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : Any = 2
lowerCAmelCase : Union[str, Any] = 3
lowerCAmelCase : Union[str, Any] = 4
lowerCAmelCase : Union[str, Any] = 5
@dataclass
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : jnp.ndarray
class lowerCAmelCase__ :
lowerCAmelCase : Any = SCHEDULER_CONFIG_NAME
lowerCAmelCase : int = ["dtype"]
lowerCAmelCase : Dict = []
lowerCAmelCase : Optional[Any] = True
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , lowerCamelCase__ : Dict[str, Any] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : List[str]=False , **lowerCamelCase__ : str , ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : int = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase__ , subfolder=lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ , **lowerCamelCase__ , )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = cls.from_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ , **lowerCamelCase__ )
if hasattr(lowerCamelCase__ , "create_state" ) and getattr(lowerCamelCase__ , "has_state" , lowerCamelCase__ ):
_UpperCAmelCase : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Union[str, os.PathLike] , lowerCamelCase__ : bool = False , **lowerCamelCase__ : Dict ) ->Optional[int]:
'''simple docstring'''
self.save_config(save_directory=lowerCamelCase__ , push_to_hub=lowerCamelCase__ , **lowerCamelCase__ )
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCAmelCase__ ( cls : Dict ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = list(set([cls.__name__] + cls._compatibles ) )
_UpperCAmelCase : Optional[int] = importlib.import_module(__name__.split("." )[0] )
_UpperCAmelCase : Union[str, Any] = [
getattr(lowerCamelCase__ , lowerCamelCase__ ) for c in compatible_classes_str if hasattr(lowerCamelCase__ , lowerCamelCase__ )
]
return compatible_classes
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
assert len(__lowerCAmelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__lowerCAmelCase ) - x.ndim) ) , __lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=0.9_9_9 , __lowerCAmelCase=jnp.floataa ):
def alpha_bar(__lowerCAmelCase ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
_UpperCAmelCase : Optional[Any] = []
for i in range(__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = i / num_diffusion_timesteps
_UpperCAmelCase : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__lowerCAmelCase ) / alpha_bar(__lowerCAmelCase ) , __lowerCAmelCase ) )
return jnp.array(__lowerCAmelCase , dtype=__lowerCAmelCase )
@flax.struct.dataclass
class lowerCAmelCase__ :
lowerCAmelCase : jnp.ndarray
lowerCAmelCase : jnp.ndarray
lowerCAmelCase : jnp.ndarray
@classmethod
def lowerCAmelCase__ ( cls : Tuple , lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = scheduler.config
if config.trained_betas is not None:
_UpperCAmelCase : Dict = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
_UpperCAmelCase : Any = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase : int = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase : Any = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
_UpperCAmelCase : Tuple = 1.0 - betas
_UpperCAmelCase : Tuple = jnp.cumprod(lowerCamelCase__ , axis=0 )
return cls(
alphas=lowerCamelCase__ , betas=lowerCamelCase__ , alphas_cumprod=lowerCamelCase__ , )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = state.alphas_cumprod
_UpperCAmelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5
_UpperCAmelCase : Any = sqrt_alpha_prod.flatten()
_UpperCAmelCase : Union[str, Any] = broadcast_to_shape_from_left(__lowerCAmelCase , original_samples.shape )
_UpperCAmelCase : str = (1 - alphas_cumprod[timesteps]) ** 0.5
_UpperCAmelCase : List[str] = sqrt_one_minus_alpha_prod.flatten()
_UpperCAmelCase : str = broadcast_to_shape_from_left(__lowerCAmelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = get_sqrt_alpha_prod(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase : int = get_sqrt_alpha_prod(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 322
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
lowerCamelCase__ = int(input('Enter number: ').strip())
print(F'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
| 322
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[Any] = "ibert"
def __init__( self : List[Any] , lowerCamelCase__ : Dict=3_05_22 , lowerCamelCase__ : Dict=7_68 , lowerCamelCase__ : Union[str, Any]=12 , lowerCamelCase__ : Tuple=12 , lowerCamelCase__ : List[Any]=30_72 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : str=5_12 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : List[str]=0.0_2 , lowerCamelCase__ : Optional[Any]=1E-12 , lowerCamelCase__ : List[str]=1 , lowerCamelCase__ : Tuple=0 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : Tuple="absolute" , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Dict="none" , **lowerCamelCase__ : str , ) ->int:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Tuple = vocab_size
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = max_position_embeddings
_UpperCAmelCase : Dict = type_vocab_size
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Optional[Any] = layer_norm_eps
_UpperCAmelCase : Optional[int] = position_embedding_type
_UpperCAmelCase : Dict = quant_mode
_UpperCAmelCase : str = force_dequant
class lowerCAmelCase__ ( UpperCAmelCase__ ):
@property
def lowerCAmelCase__ ( self : Dict ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCAmelCase : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 322
|
'''simple docstring'''
from collections.abc import Sequence
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return sum(c * (x**i) for i, c in enumerate(__lowerCAmelCase ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = 0.0
for coeff in reversed(__lowerCAmelCase ):
_UpperCAmelCase : int = result * x + coeff
return result
if __name__ == "__main__":
lowerCamelCase__ = (0.0, 0.0, 5.0, 9.3, 7.0)
lowerCamelCase__ = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 322
| 1
|
'''simple docstring'''
import re
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = re.compile(
R"^(?:0|94|\+94|0{2}94)" R"7(0|1|2|4|5|6|7|8)" R"(-| |)" R"\d{7}$" )
return bool(re.search(__lowerCAmelCase , __lowerCAmelCase ) )
if __name__ == "__main__":
lowerCamelCase__ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 322
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
_UpperCAmelCase : Tuple = sum(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_UpperCAmelCase : Any = True
for i in range(1 , s + 1 ):
_UpperCAmelCase : List[Any] = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_UpperCAmelCase : Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
_UpperCAmelCase : Any = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_UpperCAmelCase : List[Any] = s - 2 * j
break
return diff
| 322
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase__ = parser.parse_args()
if args.model_type == "roberta":
lowerCamelCase__ = RobertaForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase__ = 'roberta'
elif args.model_type == "gpt2":
lowerCamelCase__ = GPTaLMHeadModel.from_pretrained(args.model_name)
lowerCamelCase__ = 'transformer'
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowerCamelCase__ = state_dict[F'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowerCamelCase__ = F'''{prefix}.embeddings.{w}.weight'''
lowerCamelCase__ = state_dict[param_name]
for w in ["weight", "bias"]:
lowerCamelCase__ = F'''{prefix}.embeddings.LayerNorm.{w}'''
lowerCamelCase__ = state_dict[param_name]
# Transformer Blocks #
lowerCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
F'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
lowerCamelCase__ = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowerCamelCase__ = state_dict[F'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''lm_head.dense.{w}''']
lowerCamelCase__ = state_dict[F'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''{prefix}.ln_f.{w}''']
lowerCamelCase__ = state_dict['lm_head.weight']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 322
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "resnet"
lowerCAmelCase : Union[str, Any] = ["basic", "bottleneck"]
def __init__( self : Dict , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Any=64 , lowerCamelCase__ : Optional[int]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase__ : int=[3, 4, 6, 3] , lowerCamelCase__ : Dict="bottleneck" , lowerCamelCase__ : Dict="relu" , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Any=None , lowerCamelCase__ : int=None , **lowerCamelCase__ : Tuple , ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : List[str] = embedding_size
_UpperCAmelCase : Tuple = hidden_sizes
_UpperCAmelCase : Dict = depths
_UpperCAmelCase : List[Any] = layer_type
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Tuple = downsample_in_first_stage
_UpperCAmelCase : str = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[Any] = version.parse("1.11" )
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self : str ) ->float:
'''simple docstring'''
return 1E-3
| 322
| 1
|
'''simple docstring'''
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
lowerCamelCase__ = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , ):
output_path.parent.mkdir(parents=__lowerCAmelCase , exist_ok=__lowerCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__lowerCAmelCase , __lowerCAmelCase , f=output_path.as_posix() , input_names=__lowerCAmelCase , output_names=__lowerCAmelCase , dynamic_axes=__lowerCAmelCase , do_constant_folding=__lowerCAmelCase , use_external_data_format=__lowerCAmelCase , enable_onnx_checker=__lowerCAmelCase , opset_version=__lowerCAmelCase , )
else:
export(
__lowerCAmelCase , __lowerCAmelCase , f=output_path.as_posix() , input_names=__lowerCAmelCase , output_names=__lowerCAmelCase , dynamic_axes=__lowerCAmelCase , do_constant_folding=__lowerCAmelCase , opset_version=__lowerCAmelCase , )
@torch.no_grad()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ):
_UpperCAmelCase : int = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_UpperCAmelCase : Optional[int] = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
_UpperCAmelCase : Tuple = "cpu"
_UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained(__lowerCAmelCase , torch_dtype=__lowerCAmelCase ).to(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = Path(__lowerCAmelCase )
# TEXT ENCODER
_UpperCAmelCase : Union[str, Any] = pipeline.text_encoder.config.max_position_embeddings
_UpperCAmelCase : Optional[int] = pipeline.text_encoder.config.hidden_size
_UpperCAmelCase : Optional[int] = pipeline.tokenizer(
"A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=__lowerCAmelCase , return_tensors="pt" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=__lowerCAmelCase , dtype=torch.intaa )) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} , opset=__lowerCAmelCase , )
del pipeline.text_encoder
# UNET
_UpperCAmelCase : Optional[int] = pipeline.unet.config.in_channels
_UpperCAmelCase : Any = pipeline.unet.config.sample_size
_UpperCAmelCase : Optional[int] = output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase ),
torch.randn(2 ).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase ),
torch.randn(2 , __lowerCAmelCase , __lowerCAmelCase ).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase ),
False,
) , output_path=__lowerCAmelCase , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} , opset=__lowerCAmelCase , use_external_data_format=__lowerCAmelCase , )
_UpperCAmelCase : List[str] = str(unet_path.absolute().as_posix() )
_UpperCAmelCase : int = os.path.dirname(__lowerCAmelCase )
_UpperCAmelCase : Any = onnx.load(__lowerCAmelCase )
# clean up existing tensor files
shutil.rmtree(__lowerCAmelCase )
os.mkdir(__lowerCAmelCase )
# collate external tensor files into one
onnx.save_model(
__lowerCAmelCase , __lowerCAmelCase , save_as_external_data=__lowerCAmelCase , all_tensors_to_one_file=__lowerCAmelCase , location="weights.pb" , convert_attribute=__lowerCAmelCase , )
del pipeline.unet
# VAE ENCODER
_UpperCAmelCase : List[Any] = pipeline.vae
_UpperCAmelCase : Tuple = vae_encoder.config.in_channels
_UpperCAmelCase : List[Any] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
_UpperCAmelCase : Tuple = lambda __lowerCAmelCase , __lowerCAmelCase : vae_encoder.encode(__lowerCAmelCase , __lowerCAmelCase )[0].sample()
onnx_export(
__lowerCAmelCase , model_args=(
torch.randn(1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase ),
False,
) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=__lowerCAmelCase , )
# VAE DECODER
_UpperCAmelCase : List[str] = pipeline.vae
_UpperCAmelCase : Dict = vae_decoder.config.latent_channels
_UpperCAmelCase : int = vae_decoder.config.out_channels
# forward only through the decoder part
_UpperCAmelCase : str = vae_encoder.decode
onnx_export(
__lowerCAmelCase , model_args=(
torch.randn(1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=__lowerCAmelCase , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
_UpperCAmelCase : Optional[Any] = pipeline.safety_checker
_UpperCAmelCase : List[Any] = safety_checker.config.vision_config.num_channels
_UpperCAmelCase : List[str] = safety_checker.config.vision_config.image_size
_UpperCAmelCase : Union[str, Any] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase ),
torch.randn(1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).to(device=__lowerCAmelCase , dtype=__lowerCAmelCase ),
) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} , opset=__lowerCAmelCase , )
del pipeline.safety_checker
_UpperCAmelCase : Dict = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" )
_UpperCAmelCase : List[str] = pipeline.feature_extractor
else:
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : int = None
_UpperCAmelCase : List[str] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) , scheduler=pipeline.scheduler , safety_checker=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(__lowerCAmelCase )
print("ONNX pipeline saved to" , __lowerCAmelCase )
del pipeline
del onnx_pipeline
_UpperCAmelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(__lowerCAmelCase , provider="CPUExecutionProvider" )
print("ONNX pipeline is loadable" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
lowerCamelCase__ = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 322
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __lowerCAmelCase (__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
_UpperCAmelCase : int = [image]
_UpperCAmelCase : str = [trans(img.convert("RGB" ) ) for img in image]
_UpperCAmelCase : Optional[Any] = torch.stack(__lowerCAmelCase )
return image
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : int ) ->int:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str ) ->Union[str, Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = min(int(num_inference_steps * strength ) , lowerCamelCase__ )
_UpperCAmelCase : str = max(num_inference_steps - init_timestep , 0 )
_UpperCAmelCase : List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any]=None ) ->str:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase__ )}""" )
_UpperCAmelCase : Union[str, Any] = image.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_UpperCAmelCase : List[str] = init_latents.shape
_UpperCAmelCase : Optional[int] = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ )
# get latents
print("add noise to latents at timestep" , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.scheduler.add_noise(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , lowerCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image] = None , lowerCamelCase__ : float = 0.8 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , ) ->Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowerCamelCase__ )
# 2. Preprocess image
_UpperCAmelCase : Dict = preprocess(lowerCamelCase__ )
# 3. set timesteps
self.scheduler.set_timesteps(lowerCamelCase__ , device=self.device )
_UpperCAmelCase , _UpperCAmelCase : Any = self.get_timesteps(lowerCamelCase__ , lowerCamelCase__ , self.device )
_UpperCAmelCase : List[Any] = timesteps[:1].repeat(lowerCamelCase__ )
# 4. Prepare latent variables
_UpperCAmelCase : Optional[int] = self.prepare_latents(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.unet.dtype , self.device , lowerCamelCase__ )
_UpperCAmelCase : Any = latents
# 5. Denoising loop
for t in self.progress_bar(lowerCamelCase__ ):
# 1. predict noise model_output
_UpperCAmelCase : Union[str, Any] = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase : int = self.scheduler.step(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , eta=lowerCamelCase__ , use_clipped_model_output=lowerCamelCase__ , generator=lowerCamelCase__ , ).prev_sample
_UpperCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowerCamelCase__ )
| 322
| 1
|
'''simple docstring'''
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCamelCase__ = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = list(s_dict.keys() )
for key in keys:
_UpperCAmelCase : Union[str, Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_UpperCAmelCase : Dict = new_key.replace(__lowerCAmelCase , __lowerCAmelCase )
print(F"""{key} -> {new_key}""" )
_UpperCAmelCase : int = s_dict.pop(__lowerCAmelCase )
return s_dict
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase : str = emb.weight.shape
_UpperCAmelCase : List[Any] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = emb.weight.data
return lin_layer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
_UpperCAmelCase : str = os.path.basename(__lowerCAmelCase )
_UpperCAmelCase : int = url.split("/" )[-2]
_UpperCAmelCase : List[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ) and not os.path.isfile(__lowerCAmelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(__lowerCAmelCase ):
_UpperCAmelCase : Any = open(__lowerCAmelCase , "rb" ).read()
if hashlib.shaaaa(__lowerCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(__lowerCAmelCase ) as source, open(__lowerCAmelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=__lowerCAmelCase , unit_divisor=1_024 ) as loop:
while True:
_UpperCAmelCase : Tuple = source.read(8_192 )
if not buffer:
break
output.write(__lowerCAmelCase )
loop.update(len(__lowerCAmelCase ) )
_UpperCAmelCase : Union[str, Any] = open(__lowerCAmelCase , "rb" ).read()
if hashlib.shaaaa(__lowerCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if ".pt" not in checkpoint_path:
_UpperCAmelCase : int = _download(_MODELS[checkpoint_path] )
else:
_UpperCAmelCase : int = torch.load(__lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : List[str] = original_checkpoint["dims"]
_UpperCAmelCase : Dict = original_checkpoint["model_state_dict"]
_UpperCAmelCase : int = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(__lowerCAmelCase )
rename_keys(__lowerCAmelCase )
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : str = state_dict["decoder.layers.0.fc1.weight"].shape[0]
_UpperCAmelCase : Dict = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=__lowerCAmelCase , decoder_ffn_dim=__lowerCAmelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
_UpperCAmelCase : Any = WhisperForConditionalGeneration(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0 and not set(__lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
_UpperCAmelCase : str = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_UpperCAmelCase : Any = proj_out_weights
model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCamelCase__ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 322
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
lowerCamelCase__ = list[list[float | int]]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : float
for row in range(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = matrix[row][col]
_UpperCAmelCase : Optional[int] = vector[row][0]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCAmelCase , __lowerCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase : str = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __lowerCAmelCase ):
for row in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCAmelCase )
]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix = [[0] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
for x_val, y_val in enumerate(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase : int = y_val
_UpperCAmelCase : List[str] = solve(__lowerCAmelCase , __lowerCAmelCase )
def interpolated_func(__lowerCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCAmelCase ) )
return interpolated_func
def __lowerCAmelCase (__lowerCAmelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __lowerCAmelCase (__lowerCAmelCase = question_function , __lowerCAmelCase = 10 ):
_UpperCAmelCase : list[int] = [func(__lowerCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Callable[[int], int]
_UpperCAmelCase : int
for poly in polynomials:
_UpperCAmelCase : int = 1
while func(__lowerCAmelCase ) == poly(__lowerCAmelCase ):
x_val += 1
ret += poly(__lowerCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322
| 1
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = 10
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = 1
_UpperCAmelCase : Dict = max(__lowerCAmelCase )
while placement <= max_digit:
# declare and initialize empty buckets
_UpperCAmelCase : list[list] = [[] for _ in range(__lowerCAmelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
_UpperCAmelCase : Optional[int] = int((i / placement) % RADIX )
buckets[tmp].append(__lowerCAmelCase )
# put each buckets' contents into list_of_ints
_UpperCAmelCase : List[str] = 0
for b in range(__lowerCAmelCase ):
for i in buckets[b]:
_UpperCAmelCase : str = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 322
| 1
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = 'Muhammad Umer Farooq'
lowerCamelCase__ = 'MIT'
lowerCamelCase__ = '1.0.0'
lowerCamelCase__ = 'Muhammad Umer Farooq'
lowerCamelCase__ = 'contact@muhammadumerfarooq.me'
lowerCamelCase__ = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : list[str] = []
_UpperCAmelCase : List[str] = domain
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : list[tuple[str, str | None]] ) ->None:
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_UpperCAmelCase : List[Any] = parse.urljoin(self.domain , lowerCamelCase__ )
self.urls.append(lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
return ".".join(get_sub_domain_name(__lowerCAmelCase ).split("." )[-2:] )
def __lowerCAmelCase (__lowerCAmelCase ):
return parse.urlparse(__lowerCAmelCase ).netloc
def __lowerCAmelCase (__lowerCAmelCase = "https://github.com" ):
_UpperCAmelCase : str = get_domain_name(__lowerCAmelCase )
# Initialize the parser
_UpperCAmelCase : Any = Parser(__lowerCAmelCase )
try:
# Open URL
_UpperCAmelCase : List[Any] = requests.get(__lowerCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_UpperCAmelCase : Optional[Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_UpperCAmelCase : Any = requests.get(__lowerCAmelCase )
# Get the valid email.
_UpperCAmelCase : List[Any] = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__lowerCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = emails_from_url('https://github.com')
print(F'''{len(emails)} emails found:''')
print('\n'.join(sorted(emails)))
| 322
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar('T')
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : T ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = data
_UpperCAmelCase : Node[T] | None = None
def __str__( self : Any ) ->str:
'''simple docstring'''
return F"""{self.data}"""
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Tuple ) ->None:
'''simple docstring'''
_UpperCAmelCase : Node[T] | None = None
def __iter__( self : List[str] ) ->Iterator[T]:
'''simple docstring'''
_UpperCAmelCase : Any = self.top
while node:
yield node.data
_UpperCAmelCase : Dict = node.next
def __str__( self : Dict ) ->str:
'''simple docstring'''
return "->".join([str(lowerCamelCase__ ) for item in self] )
def __len__( self : Optional[int] ) ->int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def lowerCAmelCase__ ( self : List[Any] ) ->bool:
'''simple docstring'''
return self.top is None
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : T ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = Node(lowerCamelCase__ )
if not self.is_empty():
_UpperCAmelCase : Tuple = self.top
_UpperCAmelCase : List[str] = node
def lowerCAmelCase__ ( self : Union[str, Any] ) ->T:
'''simple docstring'''
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.top
_UpperCAmelCase : Optional[Any] = self.top.next
return pop_node.data
def lowerCAmelCase__ ( self : Union[str, Any] ) ->T:
'''simple docstring'''
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def lowerCAmelCase__ ( self : List[Any] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 322
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.