code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _snake_case ( snake_case__ : Dict ):
A = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def _snake_case ( snake_case__ : int ):
A , A = emb.weight.shape
A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
A = emb.weight.data
return lin_layer
def _snake_case ( snake_case__ : List[str] , snake_case__ : Any="facebook/mbart-large-en-ro" , snake_case__ : Optional[int]=False , snake_case__ : List[str]=False ):
A = torch.load(snake_case__ , map_location='cpu' )['model']
remove_ignore_keys_(snake_case__ )
A = state_dict['encoder.embed_tokens.weight'].shape[0]
A = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ )
if mbart_aa and finetuned:
A = 'relu'
A = state_dict['decoder.embed_tokens.weight']
A = MBartForConditionalGeneration(snake_case__ )
model.model.load_state_dict(snake_case__ )
if finetuned:
A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
_lowercase = parser.parse_args()
_lowercase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 74
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase = 16
_lowercase = 32
def _snake_case ( snake_case__ : Accelerator , snake_case__ : int = 16 , snake_case__ : str = "bert-base-cased" ):
A = AutoTokenizer.from_pretrained(snake_case__ )
A = load_dataset('glue' , 'mrpc' )
def tokenize_function(snake_case__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(snake_case__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
A = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Optional[int] ):
# Initialize accelerator
A = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A = config['lr']
A = int(config['num_epochs'] )
A = int(config['seed'] )
A = int(config['batch_size'] )
A = args.model_name_or_path
set_seed(snake_case__ )
A , A = get_dataloaders(snake_case__ , snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ )
# Instantiate optimizer
A = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A = optimizer_cls(params=model.parameters() , lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
A = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A = 1
A = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
A = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
A = 0
# We also need to keep track of the stating epoch so files are named properly
A = 0
# Now we train the model
A = evaluate.load('glue' , 'mrpc' )
A = 0
A = {}
for epoch in range(snake_case__ , snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
A = model(**snake_case__ )
A = outputs.loss
A = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
A = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A = model(**snake_case__ )
A = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A , A = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case__ ) - 1:
A = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , snake_case__ )
A = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
A = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(snake_case__ , snake_case__ )
def _snake_case ( ):
A = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=snake_case__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=snake_case__ , )
parser.add_argument(
'--output_dir' , type=snake_case__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=snake_case__ , default=snake_case__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=snake_case__ , default=3 , help='Number of train epochs.' , )
A = parser.parse_args()
A = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 74
| 1
|
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
lowercase__ : Dict = logging.get_logger(__name__)
def __lowercase ( _a , _a ):
"""simple docstring"""
def run_func(_a ):
@wraps(_a )
def run_in_eager_mode(*_a , **_a ):
return func(*_a , **_a )
@wraps(_a )
@tf.function(experimental_compile=_a )
def run_in_graph_mode(*_a , **_a ):
return func(*_a , **_a )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __lowercase ( _a , _a , _a ):
"""simple docstring"""
snake_case_ : Tuple = random.Random()
snake_case_ : Union[str, Any] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(_a , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : TensorFlowBenchmarkArguments
_lowerCAmelCase : PretrainedConfig
_lowerCAmelCase : str = "TensorFlow"
@property
def _snake_case ( self : Optional[int] ):
return tf.__version__
def _snake_case ( self : Tuple , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
# initialize GPU on separate process
snake_case_ : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
snake_case_ : Tuple = self._prepare_inference_func(lowercase_ , lowercase_ , lowercase_ )
return self._measure_speed(_inference )
def _snake_case ( self : List[str] , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
snake_case_ : Dict = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
snake_case_ : int = self._prepare_train_func(lowercase_ , lowercase_ , lowercase_ )
return self._measure_speed(_train )
def _snake_case ( self : int , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowercase_ )
snake_case_ : Any = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
snake_case_ : str = self._prepare_inference_func(lowercase_ , lowercase_ , lowercase_ )
return self._measure_memory(_inference )
def _snake_case ( self : Optional[Any] , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowercase_ )
snake_case_ : Union[str, Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
snake_case_ : List[str] = self._prepare_train_func(lowercase_ , lowercase_ , lowercase_ )
return self._measure_memory(_train )
def _snake_case ( self : Dict , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
snake_case_ : str = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
snake_case_ : Optional[Any] = (
hasattr(lowercase_ , '''architectures''' )
and isinstance(config.architectures , lowercase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
snake_case_ : int = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
snake_case_ : Union[str, Any] = __import__('''transformers''' , fromlist=[model_class] )
snake_case_ : int = getattr(lowercase_ , lowercase_ )
snake_case_ : str = model_cls(lowercase_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
snake_case_ : Tuple = TF_MODEL_MAPPING[config.__class__](lowercase_ )
# encoder-decoder has vocab size saved differently
snake_case_ : Union[str, Any] = config.vocab_size if hasattr(lowercase_ , '''vocab_size''' ) else config.encoder.vocab_size
snake_case_ : str = random_input_ids(lowercase_ , lowercase_ , lowercase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowercase_ , decoder_input_ids=lowercase_ , training=lowercase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowercase_ , training=lowercase_ )
snake_case_ : List[Any] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _snake_case ( self : List[Any] , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
snake_case_ : Dict = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
snake_case_ : List[Any] = (
hasattr(lowercase_ , '''architectures''' )
and isinstance(config.architectures , lowercase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
snake_case_ : str = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
snake_case_ : int = __import__('''transformers''' , fromlist=[model_class] )
snake_case_ : Dict = getattr(lowercase_ , lowercase_ )
snake_case_ : Tuple = model_cls(lowercase_ )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
snake_case_ : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowercase_ )
# encoder-decoder has vocab size saved differently
snake_case_ : List[Any] = config.vocab_size if hasattr(lowercase_ , '''vocab_size''' ) else config.encoder.vocab_size
snake_case_ : Dict = random_input_ids(lowercase_ , lowercase_ , lowercase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
snake_case_ : List[str] = model(lowercase_ , decoder_input_ids=lowercase_ , labels=lowercase_ , training=lowercase_ )[0]
snake_case_ : List[str] = tf.gradients(lowercase_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
snake_case_ : Optional[Any] = model(lowercase_ , labels=lowercase_ , training=lowercase_ )[0]
snake_case_ : List[Any] = tf.gradients(lowercase_ , model.trainable_variables )
return gradients
snake_case_ : Optional[int] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _snake_case ( self : Tuple , lowercase_ : str ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(lowercase_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
snake_case_ : str = timeit.repeat(
lowercase_ , repeat=self.args.repeat , number=10 , )
return min(lowercase_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
def _snake_case ( self : int , lowercase_ : Callable[[], None] ):
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
snake_case_ : List[Any] = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
snake_case_ : str = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
snake_case_ : Optional[Any] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
snake_case_ : Dict = nvml.nvmlDeviceGetMemoryInfo(lowercase_ )
snake_case_ : Dict = meminfo.used
snake_case_ : Tuple = Memory(lowercase_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
snake_case_ : int = None
else:
snake_case_ : Optional[int] = measure_peak_memory_cpu(lowercase_ )
snake_case_ : Union[str, Any] = Memory(lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
snake_case_ : Tuple = stop_memory_tracing(lowercase_ )
if memory is None:
snake_case_ : Union[str, Any] = summary.total
else:
snake_case_ : Any = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
return "N/A", None
| 363
|
"""simple docstring"""
from math import pow
def __lowercase ( _a , _a , _a , _a , _a , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
snake_case_ : int = int(pow(_a , _a ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
snake_case_, snake_case_ : List[Any] = backtrack(
_a , _a , current_number + 1 , _a , _a )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
snake_case_, snake_case_ : str = backtrack(
_a , _a , current_number + 1 , _a , _a )
return current_sum, solutions_count
def __lowercase ( _a , _a ):
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(_a , _a , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155
| 0
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def UpperCamelCase_ ( lowerCAmelCase__ : dict ) -> str:
"""simple docstring"""
return (data["data"], data["target"])
def UpperCamelCase_ ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : str = XGBClassifier()
classifier.fit(__lowerCamelCase , __lowerCamelCase )
return classifier
def UpperCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : str = load_iris()
lowerCAmelCase_ : List[str] = data_handling(__lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = train_test_split(
__lowerCamelCase , __lowerCamelCase , test_size=0.25 )
lowerCAmelCase_ : str = iris['''target_names''']
# Create an XGBoost Classifier from the training data
lowerCAmelCase_ : List[str] = xgboost(__lowerCamelCase , __lowerCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , display_labels=__lowerCamelCase , cmap='Blues' , normalize='true' , )
plt.title('Normalized Confusion Matrix - IRIS Dataset' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 224
|
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_lowercase : Tuple = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = ['pixel_values']
def __init__( self : Optional[Any], lowerCamelCase : bool = True, lowerCamelCase : Union[int, float] = 1 / 255, lowerCamelCase : bool = True, lowerCamelCase : int = 8, **lowerCamelCase : Tuple, )-> None:
super().__init__(**lowerCamelCase )
lowerCamelCase__ : int =do_rescale
lowerCamelCase__ : Dict =rescale_factor
lowerCamelCase__ : Union[str, Any] =do_pad
lowerCamelCase__ : Union[str, Any] =pad_size
def snake_case ( self : int, lowerCamelCase : np.ndarray, lowerCamelCase : float, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : int )-> np.ndarray:
return rescale(lowerCamelCase, scale=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase )
def snake_case ( self : Optional[Any], lowerCamelCase : np.ndarray, lowerCamelCase : int, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None )-> List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =get_image_size(lowerCamelCase )
lowerCamelCase__ : List[str] =(old_height // size + 1) * size - old_height
lowerCamelCase__ : List[str] =(old_width // size + 1) * size - old_width
return pad(lowerCamelCase, ((0, pad_height), (0, pad_width)), mode='''symmetric''', data_format=lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : ImageInput, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[float] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, lowerCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST, **lowerCamelCase : Union[str, Any], )-> Dict:
lowerCamelCase__ : List[str] =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : str =do_pad if do_pad is not None else self.do_pad
lowerCamelCase__ : int =pad_size if pad_size is not None else self.pad_size
lowerCamelCase__ : Optional[int] =make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowerCamelCase__ : Tuple =[to_numpy_array(lowerCamelCase ) for image in images]
if do_rescale:
lowerCamelCase__ : Tuple =[self.rescale(image=lowerCamelCase, scale=lowerCamelCase ) for image in images]
if do_pad:
lowerCamelCase__ : Tuple =[self.pad(lowerCamelCase, size=lowerCamelCase ) for image in images]
lowerCamelCase__ : int =[to_channel_dimension_format(lowerCamelCase, lowerCamelCase ) for image in images]
lowerCamelCase__ : Dict ={'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase, tensor_type=lowerCamelCase )
| 238
| 0
|
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__lowercase = data_utils.TransfoXLTokenizer
__lowercase = data_utils.TransfoXLCorpus
__lowercase = data_utils
__lowercase = data_utils
def lowercase ( A_ , A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(a__ , "rb" ) as fp:
a : Tuple = pickle.load(a__ , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
a : Optional[int] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
a : Optional[Any] = corpus.vocab.__dict__
torch.save(a__ , a__ )
a : int = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , a__ )
a : List[str] = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(a__ , a__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
a : Optional[int] = os.path.abspath(a__ )
a : Optional[int] = os.path.abspath(a__ )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
a : Union[str, Any] = TransfoXLConfig()
else:
a : List[Any] = TransfoXLConfig.from_json_file(a__ )
print(F'''Building PyTorch model from configuration: {config}''' )
a : int = TransfoXLLMHeadModel(a__ )
a : List[Any] = load_tf_weights_in_transfo_xl(a__ , a__ , a__ )
# Save pytorch-model
a : int = os.path.join(a__ , a__ )
a : Optional[int] = os.path.join(a__ , a__ )
print(F'''Save PyTorch model to {os.path.abspath(a__ )}''' )
torch.save(model.state_dict() , a__ )
print(F'''Save configuration file to {os.path.abspath(a__ )}''' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
__lowercase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 369
|
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
a : Optional[int] = TapasConfig.from_json_file(A_ )
# set absolute/relative position embeddings parameter
a : str = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
a : Dict = TapasForQuestionAnswering(config=A_ )
elif task == "WTQ":
# run_task_main.py hparams
a : Any = 4
a : Dict = True
# hparam_utils.py hparams
a : str = 0.6_6_4_6_9_4
a : Optional[int] = 0.2_0_7_9_5_1
a : Optional[Any] = 0.1_2_1_1_9_4
a : Union[str, Any] = True
a : int = True
a : Tuple = False
a : Dict = 0.0_3_5_2_5_1_3
a : List[str] = TapasForQuestionAnswering(config=A_ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
a : Union[str, Any] = 4
a : List[Any] = False
# hparam_utils.py hparams
a : Dict = 3_6.4_5_1_9
a : List[str] = 0.9_0_3_4_2_1
a : Optional[Any] = 2_2_2.0_8_8
a : Dict = True
a : Union[str, Any] = True
a : List[str] = True
a : List[str] = 0.7_6_3_1_4_1
a : Any = TapasForQuestionAnswering(config=A_ )
elif task == "TABFACT":
a : int = TapasForSequenceClassification(config=A_ )
elif task == "MLM":
a : int = TapasForMaskedLM(config=A_ )
elif task == "INTERMEDIATE_PRETRAINING":
a : List[Any] = TapasModel(config=A_ )
else:
raise ValueError(F'''Task {task} not supported.''' )
print(F'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(A_ , A_ , A_ )
# Save pytorch-model (weights and configuration)
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(A_ )
# Save tokenizer files
print(F'''Save tokenizer files to {pytorch_dump_path}''' )
a : Optional[int] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(A_ )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 226
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =["pixel_values"]
def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 2_55 , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , **UpperCamelCase_ , ) -> None:
super().__init__(**UpperCamelCase_ )
__lowercase : Dict = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
__lowercase : int = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
__lowercase : Tuple = do_resize
__lowercase : List[str] = size
__lowercase : Dict = resample
__lowercase : Optional[Any] = do_rescale
__lowercase : List[str] = rescale_factor
__lowercase : Any = do_normalize
__lowercase : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase : str = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase : Any = do_convert_rgb
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray:
__lowercase : Dict = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
__lowercase : Union[str, Any] = (size['''height'''], size['''width'''])
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> List[str]:
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray:
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ) -> PIL.Image.Image:
__lowercase : int = do_resize if do_resize is not None else self.do_resize
__lowercase : Union[str, Any] = resample if resample is not None else self.resample
__lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__lowercase : Any = image_std if image_std is not None else self.image_std
__lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase : List[Any] = size if size is not None else self.size
__lowercase : List[str] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
__lowercase : str = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase : List[Any] = [convert_to_rgb(UpperCamelCase_ ) for image in images]
# All transformations expect numpy arrays.
__lowercase : Optional[int] = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
__lowercase : Any = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
__lowercase : List[Any] = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
__lowercase : Any = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
__lowercase : List[Any] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__lowercase : List[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCamelCase_ )
return encoded_outputs
| 249
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
a_ = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class UpperCAmelCase_ ( unittest.TestCase , snake_case ):
def _lowerCamelCase ( self ) -> int:
__lowercase : List[Any] = load_tool('''text-question-answering''' )
self.tool.setup()
__lowercase : Any = load_tool('''text-question-answering''' , remote=UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Dict = self.tool(UpperCamelCase_ , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(UpperCamelCase_ , '''launched the BigScience Research Workshop''' )
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Optional[int] = self.remote_tool(UpperCamelCase_ , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(UpperCamelCase_ , '''launched the BigScience Research Workshop''' )
def _lowerCamelCase ( self ) -> Any:
__lowercase : Union[str, Any] = self.tool(text=UpperCamelCase_ , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(UpperCamelCase_ , '''launched the BigScience Research Workshop''' )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[int] = self.remote_tool(text=UpperCamelCase_ , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(UpperCamelCase_ , '''launched the BigScience Research Workshop''' )
| 249
| 1
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
__A = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
__A = {
"abeja/gpt-neox-japanese-2.7b": 2048,
}
def lowerCAmelCase_ ( __a , __a ) -> Union[str, Any]:
"""simple docstring"""
with open(__a , "r" , encoding="utf-8" ) as f:
lowerCamelCase__: str =json.loads(f.read() )
lowerCamelCase__: Any =collections.OrderedDict()
lowerCamelCase__: Optional[int] =collections.OrderedDict()
lowerCamelCase__: Dict =collections.OrderedDict()
with open(__a , "r" , encoding="utf-8" ) as f:
lowerCamelCase__: Tuple =f.readlines()
lowerCamelCase__: List[str] =[[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(__a ):
lowerCamelCase__: Optional[int] =b
lowerCamelCase__: List[str] =idx
for wd in b:
lowerCamelCase__: str =idx
return vocab, raw_vocab, ids_to_tokens, emoji
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any="<|endoftext|>" , UpperCAmelCase_ : Optional[Any]="<|endoftext|>" , UpperCAmelCase_ : Optional[Any]="<|startoftext|>" , UpperCAmelCase_ : Any="<|endoftext|>" , UpperCAmelCase_ : Optional[int]=False , **UpperCAmelCase_ : Tuple , ) ->List[str]:
'''simple docstring'''
super().__init__(
unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , do_clean_text=UpperCAmelCase_ , **UpperCAmelCase_ , )
if not os.path.isfile(UpperCAmelCase_):
raise ValueError(
F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(UpperCAmelCase_):
raise ValueError(
F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
lowerCamelCase__: Dict =do_clean_text
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: int =load_vocab_and_emoji(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Any =SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->str:
'''simple docstring'''
return len(self.raw_vocab)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Tuple:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Dict) ->Union[str, Any]:
'''simple docstring'''
return self.subword_tokenizer.tokenize(UpperCAmelCase_ , clean=self.do_clean_text)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
return self.vocab.get(UpperCAmelCase_ , self.vocab.get(self.unk_token))
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : List[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: List[Any] ="".join(UpperCAmelCase_).strip()
return out_string
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : "Conversation") ->List[int]:
'''simple docstring'''
lowerCamelCase__: List[Any] =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_) + [self.eos_token_id])
if len(UpperCAmelCase_) > self.model_max_length:
lowerCamelCase__: str =input_ids[-self.model_max_length :]
return input_ids
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
lowerCamelCase__: Tuple =0
if os.path.isdir(UpperCAmelCase_):
lowerCamelCase__: List[Any] =os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
lowerCamelCase__: str =os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
lowerCamelCase__: Tuple =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
lowerCamelCase__: List[Any] =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(UpperCAmelCase_ , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
lowerCamelCase__: List[Any] =token_index
writer.write(",".join(UpperCAmelCase_) + "\n")
index += 1
with open(UpperCAmelCase_ , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , UpperCAmelCase_)
return vocab_file, emoji_file
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Dict =vocab # same as swe
lowerCamelCase__: Any =ids_to_tokens # same as bpe
lowerCamelCase__: List[Any] =emoji
lowerCamelCase__: str =np.max([len(UpperCAmelCase_) for w in self.vocab.keys()])
lowerCamelCase__: Union[str, Any] =re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
lowerCamelCase__: List[Any] =re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
lowerCamelCase__: str =re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
lowerCamelCase__: Optional[int] =re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
lowerCamelCase__: Any =re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
lowerCamelCase__: Any =re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
lowerCamelCase__: Optional[int] ="─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
lowerCamelCase__: Optional[int] ="▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
lowerCamelCase__: int =str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__(self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
return len(self.ids_to_tokens)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Dict) ->Any:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.content_repattera.sub("<URL>" , UpperCAmelCase_)
lowerCamelCase__: Tuple =self.content_repattera.sub("<EMAIL>" , UpperCAmelCase_)
lowerCamelCase__: List[str] =self.content_repattera.sub("<TEL>" , UpperCAmelCase_)
lowerCamelCase__: Tuple =self.content_repattera.sub("<DATE>" , UpperCAmelCase_)
lowerCamelCase__: List[Any] =self.content_repattera.sub("<DATE>" , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.content_repattera.sub("<PRICE>" , UpperCAmelCase_)
lowerCamelCase__: Any =content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
lowerCamelCase__: Dict =content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str]=False) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =text.replace(" " , "<SP>")
lowerCamelCase__: List[str] =text.replace(" " , "<SP>")
lowerCamelCase__: Optional[int] =text.replace("\r\n" , "<BR>")
lowerCamelCase__: Any =text.replace("\n" , "<BR>")
lowerCamelCase__: List[Any] =text.replace("\r" , "<BR>")
lowerCamelCase__: Dict =text.replace("\t" , "<TAB>")
lowerCamelCase__: Tuple =text.replace("—" , "ー")
lowerCamelCase__: Optional[int] =text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCamelCase__: List[str] =text.replace(UpperCAmelCase_ , UpperCAmelCase_)
if clean:
lowerCamelCase__: Union[str, Any] =self.clean_text(UpperCAmelCase_)
def check_simbol(UpperCAmelCase_ : Optional[Any]):
lowerCamelCase__: Optional[Any] =x.encode()
if len(UpperCAmelCase_) == 1 and len(UpperCAmelCase_) == 2:
lowerCamelCase__: Tuple =(int(e[0]) << 8) + int(e[1])
if (
(c >= 0xC2A1 and c <= 0xC2BF)
or (c >= 0xC780 and c <= 0xC783)
or (c >= 0xCAB9 and c <= 0xCBBF)
or (c >= 0xCC80 and c <= 0xCDA2)
):
return True
return False
def checkuae(UpperCAmelCase_ : Optional[int]):
lowerCamelCase__: Optional[Any] =x.encode()
if len(UpperCAmelCase_) == 1 and len(UpperCAmelCase_) == 3:
lowerCamelCase__: Optional[Any] =(int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0xE2_8080 and c <= 0xE2_B07F:
return True
return False
lowerCamelCase__: Any =0
lowerCamelCase__: str =[]
while pos < len(UpperCAmelCase_):
lowerCamelCase__: Tuple =min(len(UpperCAmelCase_) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
lowerCamelCase__: Optional[int] =[] # (token_id, token, pos)
for e in range(UpperCAmelCase_ , UpperCAmelCase_ , -1):
lowerCamelCase__: List[Any] =text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(UpperCAmelCase_) > 2:
lowerCamelCase__: Dict =[(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(UpperCAmelCase_) > 0:
# the smallest token_id is adopted
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[Any] =sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_: x[0])[0]
result.append(UpperCAmelCase_)
lowerCamelCase__: Tuple =e
else:
lowerCamelCase__: Union[str, Any] =pos + 1
lowerCamelCase__: str =text[pos:end]
if check_simbol(UpperCAmelCase_):
result.append("<KIGOU>")
elif checkuae(UpperCAmelCase_):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
lowerCamelCase__: str =end
return result
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str="\n") ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =[]
lowerCamelCase__: List[str] =[]
lowerCamelCase__: Any =self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(UpperCAmelCase_) > 0:
words.append(bytearray(UpperCAmelCase_).decode("utf-8" , errors="replace"))
lowerCamelCase__: Dict =[]
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(UpperCAmelCase_)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(UpperCAmelCase_)
if len(UpperCAmelCase_) > 0:
words.append(bytearray(UpperCAmelCase_).decode("utf-8" , errors="replace"))
lowerCamelCase__: Union[str, Any] ="".join(UpperCAmelCase_)
return text
| 273
|
from __future__ import annotations
from typing import Any
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : int) ->None:
'''simple docstring'''
lowerCamelCase__: int =num_of_nodes
lowerCamelCase__: list[list[int]] =[]
lowerCamelCase__: dict[int, int] ={}
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int) ->None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight])
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : int) ->int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : int) ->None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCamelCase__: Dict =self.find_component(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int) ->None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
lowerCamelCase__: Optional[int] =v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase_)
elif component_size[u_node] >= component_size[v_node]:
lowerCamelCase__: Tuple =self.find_component(UpperCAmelCase_)
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->None:
'''simple docstring'''
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: List[str] =0
lowerCamelCase__: list[Any] =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
lowerCamelCase__: List[str] =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =edge
lowerCamelCase__: List[Any] =self.m_component[u]
lowerCamelCase__: str =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCamelCase__: Union[str, Any] =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =edge
lowerCamelCase__: str =self.m_component[u]
lowerCamelCase__: Any =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""")
num_of_components -= 1
lowerCamelCase__: Tuple =[-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""")
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 119
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def UpperCamelCase ( snake_case__ : Tuple ) -> List[str]:
UpperCamelCase : Optional[Any] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase_ ( a__ , a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : str = StableDiffusionLatentUpscalePipeline
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
UpperCAmelCase__ : str = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
UpperCAmelCase__ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Union[str, Any] = frozenset([] )
UpperCAmelCase__ : Optional[int] = True
@property
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : List[Any] = 1
UpperCamelCase : List[str] = 4
UpperCamelCase : List[str] = (16, 16)
UpperCamelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
return image
def snake_case_ ( self ) -> int:
torch.manual_seed(0 )
UpperCamelCase : int = UNetaDConditionModel(
act_fn='gelu', attention_head_dim=8, norm_num_groups=SCREAMING_SNAKE_CASE_, block_out_channels=[32, 32, 64, 64], time_cond_proj_dim=160, conv_in_kernel=1, conv_out_kernel=1, cross_attention_dim=32, down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
), in_channels=8, mid_block_type=SCREAMING_SNAKE_CASE_, only_cross_attention=SCREAMING_SNAKE_CASE_, out_channels=5, resnet_time_scale_shift='scale_shift', time_embedding_type='fourier', timestep_post_act='gelu', up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D'), )
UpperCamelCase : Tuple = AutoencoderKL(
block_out_channels=[32, 32, 64, 64], in_channels=3, out_channels=3, down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
UpperCamelCase : Tuple = EulerDiscreteScheduler(prediction_type='sample' )
UpperCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='quick_gelu', projection_dim=512, )
UpperCamelCase : Any = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase : Optional[int] = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> List[str]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
UpperCamelCase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : str = 'cpu'
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCamelCase : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 256, 256, 3) )
UpperCamelCase : List[Any] = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] )
UpperCamelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE_, 1e-3 )
def snake_case_ ( self ) -> List[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def snake_case_ ( self ) -> Tuple:
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def snake_case_ ( self ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def snake_case_ ( self ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def snake_case_ ( self ) -> List[str]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def snake_case_ ( self ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3e-3 )
def snake_case_ ( self ) -> int:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
UpperCamelCase : Tuple = self.get_dummy_components()
UpperCamelCase : Tuple = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = 2
UpperCamelCase : Optional[Any] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
UpperCamelCase : List[Any] = getattr(SCREAMING_SNAKE_CASE_, scheduler_enum.name )
UpperCamelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
UpperCamelCase : List[str] = pipe(**SCREAMING_SNAKE_CASE_ )[0]
outputs.append(SCREAMING_SNAKE_CASE_ )
assert check_same_shape(SCREAMING_SNAKE_CASE_ )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Dict = torch.manual_seed(33 )
UpperCamelCase : Union[str, Any] = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4', torch_dtype=torch.floataa )
pipe.to('cuda' )
UpperCamelCase : Tuple = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler', torch_dtype=torch.floataa )
upscaler.to('cuda' )
UpperCamelCase : Union[str, Any] = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
UpperCamelCase : int = pipe(SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, output_type='latent' ).images
UpperCamelCase : List[str] = upscaler(
prompt=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=20, guidance_scale=0, generator=SCREAMING_SNAKE_CASE_, output_type='np', ).images[0]
UpperCamelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def snake_case_ ( self ) -> int:
UpperCamelCase : List[Any] = torch.manual_seed(33 )
UpperCamelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler', torch_dtype=torch.floataa )
upscaler.to('cuda' )
UpperCamelCase : Dict = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
UpperCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
UpperCamelCase : str = upscaler(
prompt=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=20, guidance_scale=0, generator=SCREAMING_SNAKE_CASE_, output_type='np', ).images[0]
UpperCamelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 119
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = MgpstrTokenizer
__snake_case = False
__snake_case = {}
__snake_case = False
def UpperCamelCase__ ( self ):
super().setUp()
# fmt: off
snake_case_ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
snake_case_ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' )
def UpperCamelCase__ ( self , **_UpperCAmelCase ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = '''tester'''
snake_case_ = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
snake_case_ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
snake_case_ = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , 1 )
snake_case_ = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def UpperCamelCase__ ( self ):
snake_case_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ = self.get_input_output_texts(_UpperCAmelCase )
snake_case_ = tokenizer.tokenize(_UpperCAmelCase )
snake_case_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
snake_case_ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ) , 0 )
snake_case_ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _UpperCAmelCase )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def UpperCamelCase__ ( self ):
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def UpperCamelCase__ ( self ):
pass
| 352
|
from __future__ import annotations
import time
UpperCAmelCase = list[tuple[int, int]]
UpperCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = pos_x
snake_case_ = pos_y
snake_case_ = (pos_y, pos_x)
snake_case_ = goal_x
snake_case_ = goal_y
snake_case_ = parent
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = Node(start[1] , start[0] , goal[1] , goal[0] , _UpperCAmelCase )
snake_case_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , _UpperCAmelCase )
snake_case_ = [self.start]
snake_case_ = False
def UpperCamelCase__ ( self ):
while self.node_queue:
snake_case_ = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case_ = True
return self.retrace_path(_UpperCAmelCase )
snake_case_ = self.get_successors(_UpperCAmelCase )
for node in successors:
self.node_queue.append(_UpperCAmelCase )
if not self.reached:
return [self.start.pos]
return None
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = []
for action in delta:
snake_case_ = parent.pos_x + action[1]
snake_case_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_UpperCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_UpperCAmelCase , _UpperCAmelCase , self.target.pos_y , self.target.pos_x , _UpperCAmelCase ) )
return successors
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = node
snake_case_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case_ = current_node.parent
path.reverse()
return path
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = BreadthFirstSearch(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = BreadthFirstSearch(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = False
def UpperCamelCase__ ( self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case_ = self.fwd_bfs.node_queue.pop(0 )
snake_case_ = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case_ = True
return self.retrace_bidirectional_path(
_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = current_bwd_node
snake_case_ = current_fwd_node
snake_case_ = {
self.fwd_bfs: self.fwd_bfs.get_successors(_UpperCAmelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(_UpperCAmelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_UpperCAmelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = self.fwd_bfs.retrace_path(_UpperCAmelCase )
snake_case_ = self.bwd_bfs.retrace_path(_UpperCAmelCase )
bwd_path.pop()
bwd_path.reverse()
snake_case_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
UpperCAmelCase = (0, 0)
UpperCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCAmelCase = time.time()
UpperCAmelCase = BreadthFirstSearch(init, goal)
UpperCAmelCase = bfs.search()
UpperCAmelCase = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
UpperCAmelCase = time.time()
UpperCAmelCase = BidirectionalBreadthFirstSearch(init, goal)
UpperCAmelCase = bd_bfs.search()
UpperCAmelCase = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 267
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 214
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
a = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase (snake_case__ : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
lowerCAmelCase = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
lowerCAmelCase = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
lowerCAmelCase = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 155
| 0
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(_a )
class _lowerCamelCase( _a ):
def __init__( self, **lowerCamelCase) -> Any:
"""simple docstring"""
super().__init__(**lowerCamelCase)
requires_backends(self, 'vision')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self, lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
return super().__call__(lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, **lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = {}
if "candidate_labels" in kwargs:
_lowercase : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : List[str] = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None, lowerCamelCase="This is a photo of {}.") -> Dict:
"""simple docstring"""
_lowercase : Dict = load_image(lowerCamelCase)
_lowercase : Optional[int] = self.image_processor(images=[image], return_tensors=self.framework)
_lowercase : int = candidate_labels
_lowercase : Tuple = [hypothesis_template.format(lowerCamelCase) for x in candidate_labels]
_lowercase : Any = self.tokenizer(lowerCamelCase, return_tensors=self.framework, padding=lowerCamelCase)
_lowercase : List[Any] = [text_inputs]
return inputs
def UpperCamelCase ( self, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = model_inputs.pop('candidate_labels')
_lowercase : List[str] = model_inputs.pop('text_inputs')
if isinstance(text_inputs[0], lowerCamelCase):
_lowercase : List[str] = text_inputs[0]
else:
# Batching case.
_lowercase : int = text_inputs[0][0]
_lowercase : List[Any] = self.model(**lowerCamelCase, **lowerCamelCase)
_lowercase : str = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def UpperCamelCase ( self, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = model_outputs.pop('candidate_labels')
_lowercase : List[str] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : List[str] = logits.softmax(dim=-1).squeeze(-1)
_lowercase : Optional[int] = probs.tolist()
if not isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : str = [scores]
elif self.framework == "tf":
_lowercase : List[str] = stable_softmax(lowerCamelCase, axis=-1)
_lowercase : Optional[int] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''')
_lowercase : Any = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase, lowerCamelCase), key=lambda lowerCamelCase: -x[0])
]
return result
| 361
|
import argparse
from collections import defaultdict
import yaml
SCREAMING_SNAKE_CASE : Tuple = "docs/source/en/_toctree.yml"
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
_lowercase : Tuple = defaultdict(lowerCamelCase_ )
_lowercase : int = []
_lowercase : str = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(lowerCamelCase_ )
_lowercase : Optional[int] = new_doc_list
_lowercase : str = [key for key, value in counts.items() if value > 1]
_lowercase : Union[str, Any] = []
for duplicate_key in duplicates:
_lowercase : Optional[Any] = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(lowerCamelCase_ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
_lowercase : Optional[int] = sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowerCamelCase_ ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(lowerCamelCase_ )
# Sort
return overview_doc
def UpperCamelCase_( lowerCamelCase_=False ) -> Any:
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_lowercase : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
_lowercase : Tuple = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowercase : Tuple = content[api_idx]['sections']
# Then to the model doc
_lowercase : int = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowercase : Optional[int] = api_doc[scheduler_idx]['sections']
_lowercase : List[Any] = clean_doc_toc(lowerCamelCase_ )
_lowercase : Optional[Any] = False
if new_scheduler_doc != scheduler_doc:
_lowercase : Optional[Any] = True
if overwrite:
_lowercase : str = new_scheduler_doc
if diff:
if overwrite:
_lowercase : Any = api_doc
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCamelCase_ , allow_unicode=lowerCamelCase_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def UpperCamelCase_( lowerCamelCase_=False ) -> List[str]:
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_lowercase : int = yaml.safe_load(f.read() )
# Get to the API doc
_lowercase : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowercase : str = content[api_idx]['sections']
# Then to the model doc
_lowercase : Tuple = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowercase : Tuple = False
_lowercase : Dict = api_doc[pipeline_idx]['sections']
_lowercase : Optional[Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowercase : Union[str, Any] = pipeline_doc['section']
_lowercase : List[str] = clean_doc_toc(lowerCamelCase_ )
if overwrite:
_lowercase : str = new_sub_pipeline_doc
new_pipeline_docs.append(lowerCamelCase_ )
# sort overall pipeline doc
_lowercase : int = clean_doc_toc(lowerCamelCase_ )
if new_pipeline_docs != pipeline_docs:
_lowercase : Tuple = True
if overwrite:
_lowercase : str = new_pipeline_docs
if diff:
if overwrite:
_lowercase : List[str] = api_doc
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCamelCase_ , allow_unicode=lowerCamelCase_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 84
| 0
|
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowercase_ = HUGGINGFACE_HUB_CACHE
lowercase_ = """config.json"""
lowercase_ = """diffusion_pytorch_model.bin"""
lowercase_ = """diffusion_flax_model.msgpack"""
lowercase_ = """model.onnx"""
lowercase_ = """diffusion_pytorch_model.safetensors"""
lowercase_ = """weights.pb"""
lowercase_ = """https://huggingface.co"""
lowercase_ = default_cache_path
lowercase_ = """diffusers_modules"""
lowercase_ = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowercase_ = ["""fp16""", """non-ema"""]
lowercase_ = """.self_attn"""
| 58
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__A =""
__A =""
__A =""
__A =1 # (0 is vertical, 1 is horizontal)
def a ( ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = get_dataset(_UpperCAmelCase , _UpperCAmelCase )
print('''Processing...''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = update_image_and_anno(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for index, image in enumerate(_UpperCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCAmelCase : Any = random_chars(32 )
__UpperCAmelCase : List[str] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCAmelCase : Optional[Any] = f'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(f'/{file_root}.jpg' , _UpperCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'Success {index+1}/{len(_UpperCAmelCase )} with {file_name}' )
__UpperCAmelCase : Optional[Any] = []
for anno in new_annos[index]:
__UpperCAmelCase : Union[str, Any] = f'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(_UpperCAmelCase )
with open(f'/{file_root}.txt' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def a ( _UpperCAmelCase : str , _UpperCAmelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Any = []
__UpperCAmelCase : Any = []
for label_file in glob.glob(os.path.join(_UpperCAmelCase , '''*.txt''' ) ):
__UpperCAmelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(_UpperCAmelCase ) as in_file:
__UpperCAmelCase : List[str] = in_file.readlines()
__UpperCAmelCase : Optional[Any] = os.path.join(_UpperCAmelCase , f'{label_name}.jpg' )
__UpperCAmelCase : str = []
for obj_list in obj_lists:
__UpperCAmelCase : str = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_UpperCAmelCase )
labels.append(_UpperCAmelCase )
return img_paths, labels
def a ( _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : int = 1 ):
'''simple docstring'''
__UpperCAmelCase : Dict = []
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Any = []
for idx in range(len(_UpperCAmelCase ) ):
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : List[Any] = img_list[idx]
path_list.append(_UpperCAmelCase )
__UpperCAmelCase : str = anno_list[idx]
__UpperCAmelCase : str = cva.imread(_UpperCAmelCase )
if flip_type == 1:
__UpperCAmelCase : Any = cva.flip(_UpperCAmelCase , _UpperCAmelCase )
for bbox in img_annos:
__UpperCAmelCase : List[str] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__UpperCAmelCase : Any = cva.flip(_UpperCAmelCase , _UpperCAmelCase )
for bbox in img_annos:
__UpperCAmelCase : Union[str, Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_UpperCAmelCase )
new_imgs_list.append(_UpperCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def a ( _UpperCAmelCase : int = 32 ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__UpperCAmelCase : Union[str, Any] = ascii_lowercase + digits
return "".join(random.choice(_UpperCAmelCase ) for _ in range(_UpperCAmelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 226
| 0
|
import sys
__snake_case = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _lowercase ( UpperCamelCase_ = N ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = -sys.maxsize - 1
for i in range(len(UpperCamelCase_ ) - 12 ):
SCREAMING_SNAKE_CASE__ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
SCREAMING_SNAKE_CASE__ = product
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 169
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__snake_case = False
try:
__snake_case = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class lowercase__ :
def __init__( self : Optional[Any] , UpperCAmelCase_ : str = None , UpperCAmelCase_ : list = [] ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = choices
SCREAMING_SNAKE_CASE__ = prompt
if sys.platform == "win32":
SCREAMING_SNAKE_CASE__ = '*'
else:
SCREAMING_SNAKE_CASE__ = '➔ '
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , UpperCAmelCase_ )
else:
forceWrite(self.choices[index] , UpperCAmelCase_ )
def A_ ( self : List[str] , UpperCAmelCase_ : int ):
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(UpperCAmelCase_ )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def A_ ( self : List[Any] , UpperCAmelCase_ : Direction , UpperCAmelCase_ : int = 1 ):
SCREAMING_SNAKE_CASE__ = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCAmelCase_ )
move_cursor(UpperCAmelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def A_ ( self : Optional[Any] ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def A_ ( self : List[Any] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def A_ ( self : Dict ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def A_ ( self : Optional[Any] ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCAmelCase_ )] for number in range(10 )] )
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = int(chr(self.current_selection ) )
SCREAMING_SNAKE_CASE__ = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCAmelCase_ )
else:
return
else:
return
def A_ ( self : Optional[int] , UpperCAmelCase_ : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
SCREAMING_SNAKE_CASE__ = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCAmelCase_ )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
SCREAMING_SNAKE_CASE__ = int(builtins.input() )
except ValueError:
SCREAMING_SNAKE_CASE__ = default_choice
else:
SCREAMING_SNAKE_CASE__ = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(UpperCAmelCase_ , '\n' )
return choice
| 169
| 1
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Optional[Any] = logging.get_logger(__name__)
__A : Optional[int] = {"tokenizer_file": "tokenizer.json"}
__A : Union[str, Any] = {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class A_ (a_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = None
def __init__( self , _A=None , _A=None , _A=None , _A="<unk>" , _A="<s>" , _A="</s>" , _A="<pad>" , _A=False , _A=False , **_A , ):
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , unk_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , add_prefix_space=_A , clean_up_tokenization_spaces=_A , **_A , )
UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase = getattr(_A , pre_tok_state.pop('''type''' ) )
UpperCAmelCase = add_prefix_space
UpperCAmelCase = pre_tok_class(**_A )
UpperCAmelCase = add_prefix_space
def _lowercase ( self , *_A , **_A ):
'''simple docstring'''
UpperCAmelCase = kwargs.get('''is_split_into_words''' , _A )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*_A , **_A )
def _lowercase ( self , *_A , **_A ):
'''simple docstring'''
UpperCAmelCase = kwargs.get('''is_split_into_words''' , _A )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''' )
return super()._encode_plus(*_A , **_A )
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A , add_special_tokens=_A ) + [self.eos_token_id] )
if len(_A ) > self.model_max_length:
UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 273
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if "model" in orig_key:
UpperCAmelCase = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
UpperCAmelCase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
UpperCAmelCase = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
UpperCAmelCase = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
UpperCAmelCase = orig_key.split('''.''' )[0].split('''_''' )[-1]
UpperCAmelCase = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
UpperCAmelCase = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
UpperCAmelCase = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
UpperCAmelCase = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
UpperCAmelCase = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
UpperCAmelCase = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
UpperCAmelCase = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
UpperCAmelCase = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
UpperCAmelCase = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
UpperCAmelCase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
UpperCAmelCase = '''yoso.''' + orig_key
return orig_key
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase = orig_state_dict.pop(UpperCamelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCAmelCase = val
UpperCAmelCase = orig_state_dict['''cls.predictions.decoder.bias''']
UpperCAmelCase = torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
UpperCAmelCase = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model_state_dict''']
UpperCAmelCase = YosoConfig.from_json_file(UpperCamelCase__ )
UpperCAmelCase = YosoForMaskedLM(UpperCamelCase__ )
UpperCAmelCase = convert_checkpoint_helper(config.max_position_embeddings , UpperCamelCase__ )
print(model.load_state_dict(UpperCamelCase__ ) )
model.eval()
model.save_pretrained(UpperCamelCase__ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A : List[str] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 273
| 1
|
def UpperCAmelCase_ ( ) -> int:
return 1
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : int = 2_00 ) -> int:
return two_pound(__UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 363
|
import cva
import numpy as np
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : float , _lowerCAmelCase : int ):
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ = k
SCREAMING_SNAKE_CASE_ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : Tuple ):
return str(self.k )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = cva.imread(_lowerCAmelCase , 0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = img.copy()
SCREAMING_SNAKE_CASE_ = cva.cvtColor(_lowerCAmelCase , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.gradient(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = dx**2
SCREAMING_SNAKE_CASE_ = dy**2
SCREAMING_SNAKE_CASE_ = dx * dy
SCREAMING_SNAKE_CASE_ = 0.04
SCREAMING_SNAKE_CASE_ = self.window_size // 2
for y in range(_lowerCAmelCase , h - offset ):
for x in range(_lowerCAmelCase , w - offset ):
SCREAMING_SNAKE_CASE_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ = wxx + wyy
SCREAMING_SNAKE_CASE_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = HarrisCorner(0.04, 3)
lowerCamelCase__ , lowerCamelCase__ : str = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 210
| 0
|
'''simple docstring'''
import math
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = input("""Enter message: """ )
__SCREAMING_SNAKE_CASE = int(input(F'Enter key [2-{len(a__ ) - 1}]: ' ) )
__SCREAMING_SNAKE_CASE = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
__SCREAMING_SNAKE_CASE = encrypt_message(a__ , a__ )
elif mode.lower().startswith("""d""" ):
__SCREAMING_SNAKE_CASE = decrypt_message(a__ , a__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'Output:\n{text + "|"}' )
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [""""""] * key
for col in range(a__ ):
__SCREAMING_SNAKE_CASE = col
while pointer < len(a__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(a__ )
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = math.ceil(len(a__ ) / key )
__SCREAMING_SNAKE_CASE = key
__SCREAMING_SNAKE_CASE = (num_cols * num_rows) - len(a__ )
__SCREAMING_SNAKE_CASE = [""""""] * num_cols
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
__SCREAMING_SNAKE_CASE = 0
row += 1
return "".join(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 267
|
'''simple docstring'''
def a__ ( a__ , a__ ):
"""simple docstring"""
_enforce_args(a__ , a__ )
if n == 0:
return 0
__SCREAMING_SNAKE_CASE = float("""-inf""" )
for i in range(1 , n + 1 ):
__SCREAMING_SNAKE_CASE = max(
a__ , prices[i - 1] + naive_cut_rod_recursive(n - i , a__ ) )
return max_revue
def a__ ( a__ , a__ ):
"""simple docstring"""
_enforce_args(a__ , a__ )
__SCREAMING_SNAKE_CASE = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(a__ , a__ , a__ )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__SCREAMING_SNAKE_CASE = float("""-inf""" )
for i in range(1 , n + 1 ):
__SCREAMING_SNAKE_CASE = max(
a__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , a__ , a__ ) , )
__SCREAMING_SNAKE_CASE = max_revenue
return max_rev[n]
def a__ ( a__ , a__ ):
"""simple docstring"""
_enforce_args(a__ , a__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__SCREAMING_SNAKE_CASE = [float("""-inf""" ) for _ in range(n + 1 )]
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , n + 1 ):
__SCREAMING_SNAKE_CASE = max_rev[i]
for j in range(1 , i + 1 ):
__SCREAMING_SNAKE_CASE = max(a__ , prices[j - 1] + max_rev[i - j] )
__SCREAMING_SNAKE_CASE = max_revenue_i
return max_rev[n]
def a__ ( a__ , a__ ):
"""simple docstring"""
if n < 0:
__SCREAMING_SNAKE_CASE = F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(a__ )
if n > len(a__ ):
__SCREAMING_SNAKE_CASE = (
"""Each integral piece of rod must have a corresponding price. """
F'Got n = {n} but length of prices = {len(a__ )}'
)
raise ValueError(a__ )
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [6, 10, 12, 15, 20, 23]
__SCREAMING_SNAKE_CASE = len(a__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__SCREAMING_SNAKE_CASE = 36
__SCREAMING_SNAKE_CASE = top_down_cut_rod(a__ , a__ )
__SCREAMING_SNAKE_CASE = bottom_up_cut_rod(a__ , a__ )
__SCREAMING_SNAKE_CASE = naive_cut_rod_recursive(a__ , a__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 267
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Optional[int] = BlenderbotSmallTokenizer
snake_case__ : List[str] = False
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
super().setUp()
_lowerCamelCase : str = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
_lowerCamelCase : Any = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : Any = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
_lowerCamelCase : List[str] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
_lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = '''adapt act apte'''
_lowerCamelCase : Tuple = '''adapt act apte'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : int = '''adapt act apte'''
_lowerCamelCase : Optional[Any] = ['''adapt''', '''act''', '''ap@@''', '''te''']
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_lowerCamelCase : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1_3_8_4]
_lowerCamelCase : List[str] = '''I am a small frog.'''
_lowerCamelCase : str = tok([src_text] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase )['''input_ids''']
_lowerCamelCase : Any = tok.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
_lowerCamelCase : Optional[Any] = '''I am a small frog .'''
_lowerCamelCase : str = '''.'''
_lowerCamelCase : str = tok(__lowerCAmelCase )['''input_ids''']
_lowerCamelCase : Dict = tok(__lowerCAmelCase )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 175
|
"""simple docstring"""
from itertools import permutations
def snake_case_ ( A_ : tuple ):
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_lowerCamelCase : Any = [7, 11, 13, 17]
for i, test in enumerate(A_ ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def snake_case_ ( A_ : int = 10 ):
'''simple docstring'''
return sum(
int(''''''.join(map(A_, A_ ) ) )
for num in permutations(range(A_ ) )
if is_substring_divisible(A_ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 175
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''DeiTFeatureExtractor''']
lowerCamelCase_ = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 244
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "bert-generation"
def __init__( self , __A=5_0358 , __A=1024 , __A=24 , __A=16 , __A=4096 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=1E-12 , __A=0 , __A=2 , __A=1 , __A="absolute" , __A=True , **__A , ) -> Tuple:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
lowerCAmelCase_ :Any = vocab_size
lowerCAmelCase_ :List[Any] = hidden_size
lowerCAmelCase_ :Optional[int] = num_hidden_layers
lowerCAmelCase_ :int = num_attention_heads
lowerCAmelCase_ :List[Any] = hidden_act
lowerCAmelCase_ :Optional[Any] = intermediate_size
lowerCAmelCase_ :List[Any] = hidden_dropout_prob
lowerCAmelCase_ :int = attention_probs_dropout_prob
lowerCAmelCase_ :Tuple = max_position_embeddings
lowerCAmelCase_ :List[str] = initializer_range
lowerCAmelCase_ :Union[str, Any] = layer_norm_eps
lowerCAmelCase_ :List[str] = position_embedding_type
lowerCAmelCase_ :Optional[int] = use_cache
| 84
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A__ = logging.get_logger(__name__)
A__ = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class a ( __lowerCamelCase , __lowerCamelCase ):
__lowerCAmelCase : Any = """dinat"""
__lowerCAmelCase : Dict = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :Dict ,__lowercase :Optional[int]=4 ,__lowercase :Union[str, Any]=3 ,__lowercase :List[Any]=6_4 ,__lowercase :int=[3, 4, 6, 5] ,__lowercase :str=[2, 4, 8, 1_6] ,__lowercase :List[str]=7 ,__lowercase :List[str]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] ,__lowercase :Union[str, Any]=3.0 ,__lowercase :Optional[int]=True ,__lowercase :Optional[Any]=0.0 ,__lowercase :Optional[int]=0.0 ,__lowercase :Any=0.1 ,__lowercase :List[Any]="gelu" ,__lowercase :int=0.02 ,__lowercase :Dict=1e-5 ,__lowercase :List[Any]=0.0 ,__lowercase :Dict=None ,__lowercase :Optional[int]=None ,**__lowercase :Dict ,):
super().__init__(**__lowercase )
snake_case__ : str = patch_size
snake_case__ : int = num_channels
snake_case__ : List[str] = embed_dim
snake_case__ : Any = depths
snake_case__ : Optional[int] = len(__lowercase )
snake_case__ : Any = num_heads
snake_case__ : Dict = kernel_size
snake_case__ : Union[str, Any] = dilations
snake_case__ : int = mlp_ratio
snake_case__ : Any = qkv_bias
snake_case__ : Dict = hidden_dropout_prob
snake_case__ : Tuple = attention_probs_dropout_prob
snake_case__ : str = drop_path_rate
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = layer_norm_eps
snake_case__ : Tuple = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case__ : Any = int(embed_dim * 2 ** (len(__lowercase ) - 1) )
snake_case__ : Tuple = layer_scale_init_value
snake_case__ : Optional[Any] = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 ,len(__lowercase ) + 1 )]
snake_case__ , snake_case__ : Tuple = get_aligned_output_features_output_indices(
out_features=__lowercase ,out_indices=__lowercase ,stage_names=self.stage_names )
| 44
|
A__ = 0 # The first color of the flag.
A__ = 1 # The second color of the flag.
A__ = 2 # The third color of the flag.
A__ = (red, white, blue)
def _lowerCAmelCase ( __lowerCAmelCase ) -> list:
"""simple docstring"""
if not sequence:
return []
if len(__lowerCAmelCase ) == 1:
return list(__lowerCAmelCase )
snake_case__ : List[Any] = 0
snake_case__ : str = len(__lowerCAmelCase ) - 1
snake_case__ : List[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
snake_case__ , snake_case__ : List[Any] = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
snake_case__ , snake_case__ : int = sequence[high], sequence[mid]
high -= 1
else:
snake_case__ : List[Any] = f"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__lowerCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ = input('''Enter numbers separated by commas:\n''').strip()
A__ = [int(item.strip()) for item in user_input.split(''',''')]
print(f"""{dutch_national_flag_sort(unsorted)}""")
| 44
| 1
|
def lowerCAmelCase ( _lowerCAmelCase : float , _lowerCAmelCase : float ):
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(1_0_0, 0.2_5) = }''')
print(F'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
| 169
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 169
| 1
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=10 , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=0.9 , _UpperCAmelCase=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = tubelet_size
snake_case_ = num_frames
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = mask_ratio
snake_case_ = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
snake_case_ = (image_size // patch_size) ** 2
snake_case_ = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
snake_case_ = int(mask_ratio * self.seq_length )
def UpperCamelCase__ ( self ):
snake_case_ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = VideoMAEModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = VideoMAEForPreTraining(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case_ = torch.ones((self.num_masks,) )
snake_case_ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
snake_case_ = mask.expand(self.batch_size , -1 ).bool()
snake_case_ = model(_UpperCAmelCase , _UpperCAmelCase )
# model only returns predictions for masked patches
snake_case_ = mask.sum().item()
snake_case_ = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def UpperCamelCase__ ( self ):
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__snake_case = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase__ ( self ):
snake_case_ = VideoMAEModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
snake_case_ = copy.deepcopy(_UpperCAmelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case_ = torch.ones((self.model_tester.num_masks,) )
snake_case_ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
snake_case_ = mask.expand(self.model_tester.batch_size , -1 ).bool()
snake_case_ = bool_masked_pos.to(_UpperCAmelCase )
if return_labels:
if model_class in [
*get_values(_UpperCAmelCase ),
]:
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCAmelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
@slow
def UpperCamelCase__ ( self ):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = VideoMAEModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCamelCase__ ( self ):
if not self.has_attentions:
pass
else:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
snake_case_ = self.model_tester.seq_length - self.model_tester.num_masks
snake_case_ = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case_ = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case_ = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
snake_case_ = len(_UpperCAmelCase )
# Check attention is always last and order is fine
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + 1 , len(_UpperCAmelCase ) )
snake_case_ = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase__ ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case_ = outputs.hidden_states
snake_case_ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
snake_case_ = self.model_tester.seq_length - self.model_tester.num_masks
snake_case_ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase__ ( self ):
pass
def __lowerCAmelCase ()-> Optional[int]:
"""simple docstring"""
snake_case_ = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
snake_case_ = np.load(SCREAMING_SNAKE_CASE )
return list(SCREAMING_SNAKE_CASE )
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
snake_case_ = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
_UpperCAmelCase )
snake_case_ = self.default_image_processor
snake_case_ = prepare_video()
snake_case_ = image_processor(_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**_UpperCAmelCase )
# verify the logits
snake_case_ = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
snake_case_ = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ):
snake_case_ = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(_UpperCAmelCase )
snake_case_ = self.default_image_processor
snake_case_ = prepare_video()
snake_case_ = image_processor(_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# add boolean mask, indicating which patches to mask
snake_case_ = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
snake_case_ = torch.load(_UpperCAmelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**_UpperCAmelCase )
# verify the logits
snake_case_ = torch.Size([1, 14_08, 15_36] )
snake_case_ = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=_UpperCAmelCase )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
snake_case_ = torch.tensor([0.5_142] , device=_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , _UpperCAmelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
snake_case_ = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=_UpperCAmelCase ).to(
_UpperCAmelCase )
with torch.no_grad():
snake_case_ = model(**_UpperCAmelCase )
snake_case_ = torch.tensor(torch.tensor([0.6_469] ) , device=_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , _UpperCAmelCase , atol=1E-4 ) )
| 359
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
super().__init__()
snake_case_ = value_function
snake_case_ = unet
snake_case_ = scheduler
snake_case_ = env
snake_case_ = env.get_dataset()
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].mean()
except: # noqa: E722
pass
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].std()
except: # noqa: E722
pass
snake_case_ = env.observation_space.shape[0]
snake_case_ = env.action_space.shape[0]
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
return (x_in - self.means[key]) / self.stds[key]
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
return x_in * self.stds[key] + self.means[key]
def UpperCamelCase__ ( self , _UpperCAmelCase ):
if type(_UpperCAmelCase ) is dict:
return {k: self.to_torch(_UpperCAmelCase ) for k, v in x_in.items()}
elif torch.is_tensor(_UpperCAmelCase ):
return x_in.to(self.unet.device )
return torch.tensor(_UpperCAmelCase , device=self.unet.device )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for key, val in cond.items():
snake_case_ = val.clone()
return x_in
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = x.shape[0]
snake_case_ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
snake_case_ = torch.full((batch_size,) , _UpperCAmelCase , device=self.unet.device , dtype=torch.long )
for _ in range(_UpperCAmelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
snake_case_ = self.value_function(x.permute(0 , 2 , 1 ) , _UpperCAmelCase ).sample
snake_case_ = torch.autograd.grad([y.sum()] , [x] )[0]
snake_case_ = self.scheduler._get_variance(_UpperCAmelCase )
snake_case_ = torch.exp(0.5 * posterior_variance )
snake_case_ = model_std * grad
snake_case_ = 0
snake_case_ = x.detach()
snake_case_ = x + scale * grad
snake_case_ = self.reset_xa(_UpperCAmelCase , _UpperCAmelCase , self.action_dim )
snake_case_ = self.unet(x.permute(0 , 2 , 1 ) , _UpperCAmelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
snake_case_ = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , predict_epsilon=_UpperCAmelCase )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
snake_case_ = self.reset_xa(_UpperCAmelCase , _UpperCAmelCase , self.action_dim )
snake_case_ = self.to_torch(_UpperCAmelCase )
return x, y
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 ):
# normalize the observations and create batch dimension
snake_case_ = self.normalize(_UpperCAmelCase , '''observations''' )
snake_case_ = obs[None].repeat(_UpperCAmelCase , axis=0 )
snake_case_ = {0: self.to_torch(_UpperCAmelCase )}
snake_case_ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
snake_case_ = randn_tensor(_UpperCAmelCase , device=self.unet.device )
snake_case_ = self.reset_xa(_UpperCAmelCase , _UpperCAmelCase , self.action_dim )
snake_case_ = self.to_torch(_UpperCAmelCase )
# run the diffusion process
snake_case_ , snake_case_ = self.run_diffusion(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# sort output trajectories by value
snake_case_ = y.argsort(0 , descending=_UpperCAmelCase ).squeeze()
snake_case_ = x[sorted_idx]
snake_case_ = sorted_values[:, :, : self.action_dim]
snake_case_ = actions.detach().cpu().numpy()
snake_case_ = self.de_normalize(_UpperCAmelCase , key='''actions''' )
# select the action with the highest value
if y is not None:
snake_case_ = 0
else:
# if we didn't run value guiding, select a random action
snake_case_ = np.random.randint(0 , _UpperCAmelCase )
snake_case_ = denorm_actions[selected_index, 0]
return denorm_actions
| 267
| 0
|
from math import sqrt
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 0
for i in range(1 ,int(sqrt(lowerCamelCase_) + 1)):
if n % i == 0 and i != sqrt(lowerCamelCase_):
total += i + n // i
elif i == sqrt(lowerCamelCase_):
total += i
return total - n
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] = 10000):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = sum(
i
for i in range(1 ,lowerCamelCase_)
if sum_of_divisors(sum_of_divisors(lowerCamelCase_)) == i and sum_of_divisors(lowerCamelCase_) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 129
|
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__a : Optional[Any] = logging.get_logger(__name__)
__a : List[str] = TypeVar("""DatasetType""", Dataset, IterableDataset)
def UpperCAmelCase ( lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = "first_exhausted" , ):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(lowercase ):
if not isinstance(lowercase , (Dataset, IterableDataset) ):
if isinstance(lowercase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(lowercase )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowercase ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase ).__name__}." )
if i == 0:
__lowercase , __lowercase = (
(Dataset, IterableDataset) if isinstance(lowercase , lowercase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase , lowercase ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowercase , lowercase , lowercase , info=lowercase , split=lowercase , stopping_strategy=lowercase )
else:
return _interleave_iterable_datasets(
lowercase , lowercase , lowercase , info=lowercase , split=lowercase , stopping_strategy=lowercase )
def UpperCAmelCase ( lowercase , lowercase = None , lowercase = None , lowercase = 0 , ):
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(lowercase ):
if not isinstance(lowercase , (Dataset, IterableDataset) ):
if isinstance(lowercase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(lowercase )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowercase ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase ).__name__}." )
if i == 0:
__lowercase , __lowercase = (
(Dataset, IterableDataset) if isinstance(lowercase , lowercase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase , lowercase ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowercase , info=lowercase , split=lowercase , axis=lowercase )
else:
return _concatenate_iterable_datasets(lowercase , info=lowercase , split=lowercase , axis=lowercase )
| 210
| 0
|
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
_SCREAMING_SNAKE_CASE = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
_SCREAMING_SNAKE_CASE = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def __lowerCamelCase ( __lowerCAmelCase : list[list[int]] ) -> list[list[int]]:
snake_case = []
for i in range(len(__lowerCamelCase ) ):
snake_case = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
snake_case = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__lowerCamelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__lowerCamelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__lowerCamelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
snake_case = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__lowerCamelCase )
return next_generation
def __lowerCamelCase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ) -> list[Image.Image]:
snake_case = []
for _ in range(__lowerCamelCase ):
# Create output image
snake_case = Image.new("""RGB""" , (len(cells[0] ), len(__lowerCamelCase )) )
snake_case = img.load()
# Save cells to image
for x in range(len(__lowerCamelCase ) ):
for y in range(len(cells[0] ) ):
snake_case = 2_55 - cells[y][x] * 2_55
snake_case = (colour, colour, colour)
# Save image
images.append(__lowerCamelCase )
snake_case = new_generation(__lowerCamelCase )
return images
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 367
|
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ) -> list:
snake_case = len(__lowerCAmelCase )
snake_case = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
snake_case = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
snake_case = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3
| 0
|
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
a_ = logging.get_logger('transformers.models.speecht5')
a_ = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
a_ = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
a_ = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
a_ = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
a_ = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
a_ = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
a_ = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
a_ = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
a_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
a_ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a_ = []
a_ = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
a_ = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
a_ = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
a_ = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def __lowercase ( lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : Tuple ):
for attribute in key.split('.' ):
UpperCamelCase_ : Any = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
UpperCamelCase_ : Tuple = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
UpperCamelCase_ : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
UpperCamelCase_ : List[str] = value
elif weight_type == "weight_g":
UpperCamelCase_ : List[str] = value
elif weight_type == "weight_v":
UpperCamelCase_ : str = value
elif weight_type == "bias":
UpperCamelCase_ : List[str] = value
elif weight_type == "running_mean":
UpperCamelCase_ : Union[str, Any] = value
elif weight_type == "running_var":
UpperCamelCase_ : List[Any] = value
elif weight_type == "num_batches_tracked":
UpperCamelCase_ : Dict = value
else:
UpperCamelCase_ : Optional[int] = value
logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def __lowercase ( lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCamelCase_, UpperCamelCase_ : int = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any ):
UpperCamelCase_ : Any = []
if task == "s2t":
UpperCamelCase_ : str = hf_model.speechta.encoder.prenet.feature_encoder
UpperCamelCase_ : str = MAPPING_S2T
UpperCamelCase_ : str = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCamelCase_ : Union[str, Any] = None
UpperCamelCase_ : Union[str, Any] = MAPPING_T2S
UpperCamelCase_ : str = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCamelCase_ : List[str] = hf_model.speechta.encoder.prenet.feature_encoder
UpperCamelCase_ : List[Any] = MAPPING_S2S
UpperCamelCase_ : Any = IGNORE_KEYS_S2S
else:
raise ValueError(F"Unsupported task: {task}" )
for name, value in fairseq_dict.items():
if should_ignore(lowerCamelCase , lowerCamelCase ):
logger.info(F"{name} was ignored" )
continue
UpperCamelCase_ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase_ : Any = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCamelCase_, UpperCamelCase_ : Any = key.split('.*.' )
if prefix in name and suffix in name:
UpperCamelCase_ : List[Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCamelCase_ : Optional[Any] = True
if "*" in mapped_key:
UpperCamelCase_ : Tuple = name.split(lowerCamelCase )[0].split('.' )[-2]
UpperCamelCase_ : List[Any] = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
UpperCamelCase_ : str = 'weight_g'
elif "weight_v" in name:
UpperCamelCase_ : Tuple = 'weight_v'
elif "bias" in name:
UpperCamelCase_ : int = 'bias'
elif "weight" in name:
UpperCamelCase_ : Tuple = 'weight'
elif "running_mean" in name:
UpperCamelCase_ : Tuple = 'running_mean'
elif "running_var" in name:
UpperCamelCase_ : Union[str, Any] = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase_ : Optional[int] = 'num_batches_tracked'
else:
UpperCamelCase_ : int = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowercase ( lowerCamelCase : List[Any] , lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] ):
UpperCamelCase_ : Union[str, Any] = full_name.split('conv_layers.' )[-1]
UpperCamelCase_ : Dict = name.split('.' )
UpperCamelCase_ : Union[str, Any] = int(items[0] )
UpperCamelCase_ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
UpperCamelCase_ : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
UpperCamelCase_ : str = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
UpperCamelCase_ : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
UpperCamelCase_ : int = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCamelCase )
@torch.no_grad()
def __lowercase ( lowerCamelCase : int , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Any=None , lowerCamelCase : List[Any]=None , lowerCamelCase : Tuple=None , ):
if config_path is not None:
UpperCamelCase_ : Tuple = SpeechTaConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_ : int = SpeechTaConfig()
if task == "s2t":
UpperCamelCase_ : Any = config.max_text_positions
UpperCamelCase_ : Optional[Any] = SpeechTaForSpeechToText(lowerCamelCase )
elif task == "t2s":
UpperCamelCase_ : Union[str, Any] = 1876
UpperCamelCase_ : List[Any] = 600
UpperCamelCase_ : str = config.max_speech_positions
UpperCamelCase_ : List[Any] = SpeechTaForTextToSpeech(lowerCamelCase )
elif task == "s2s":
UpperCamelCase_ : List[str] = 1876
UpperCamelCase_ : Optional[int] = config.max_speech_positions
UpperCamelCase_ : Optional[Any] = SpeechTaForSpeechToSpeech(lowerCamelCase )
else:
raise ValueError(F"Unknown task name: {task}" )
if vocab_path:
UpperCamelCase_ : List[Any] = SpeechTaTokenizer(lowerCamelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
UpperCamelCase_ : Optional[Any] = AddedToken('<mask>' , lstrip=lowerCamelCase , rstrip=lowerCamelCase )
UpperCamelCase_ : Any = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
UpperCamelCase_ : Any = SpeechTaFeatureExtractor()
UpperCamelCase_ : Union[str, Any] = SpeechTaProcessor(tokenizer=lowerCamelCase , feature_extractor=lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
UpperCamelCase_ : Any = torch.load(lowerCamelCase )
recursively_load_weights(fairseq_checkpoint['model'] , lowerCamelCase , lowerCamelCase )
model.save_pretrained(lowerCamelCase )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(lowerCamelCase )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
a_ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 175
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase ( ):
UpperCamelCase_ : Optional[Any] = HfArgumentParser(lowerCamelCase )
UpperCamelCase_ : Tuple = parser.parse_args_into_dataclasses()[0]
UpperCamelCase_ : Dict = TensorFlowBenchmark(args=lowerCamelCase )
try:
UpperCamelCase_ : Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCamelCase_ : Any = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
UpperCamelCase_ : Optional[int] = ' '.join(str(lowerCamelCase ).split(' ' )[:-1] )
UpperCamelCase_ : Any = ''
UpperCamelCase_ : Any = eval(str(lowerCamelCase ).split(' ' )[-1] )
UpperCamelCase_ : List[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
UpperCamelCase_ : List[str] = full_error_msg + begin_error_msg + str(lowerCamelCase )
raise ValueError(lowerCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 175
| 1
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Dict[Optional[str], Type[Formatter]] = {}
lowerCamelCase__ : Dict[Optional[str], str] = {}
lowerCamelCase__ : Dict[Optional[str], Exception] = {}
def UpperCAmelCase_ ( __UpperCAmelCase : type , __UpperCAmelCase : Optional[str] , __UpperCAmelCase : Optional[List[str]] = None , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
SCREAMING_SNAKE_CASE_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
SCREAMING_SNAKE_CASE_ = format_type
def UpperCAmelCase_ ( __UpperCAmelCase : Exception , __UpperCAmelCase : Optional[str] , __UpperCAmelCase : Optional[List[str]] = None ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
SCREAMING_SNAKE_CASE_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
lowerCamelCase__ : Optional[int] = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
lowerCamelCase__ : Dict = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
lowerCamelCase__ : Optional[Any] = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[str] ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[str] , **__UpperCAmelCase : Dict ) -> Formatter:
SCREAMING_SNAKE_CASE_ = get_format_type_from_alias(__UpperCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__UpperCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 210
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCamelCase__ : Dict = TypeVar('T')
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return (position - 1) // 2
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return (2 * position) + 1
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return (2 * position) + 2
class lowerCamelCase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 0
def __len__( self : Optional[Any] ):
return self.elements
def __repr__( self : Optional[int] ):
return str(self.heap )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# Check if the priority queue is empty
return self.elements == 0
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : T , _lowerCAmelCase : int ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
SCREAMING_SNAKE_CASE_ = self.elements
self.elements += 1
self._bubble_up(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[0]
self._bubble_down(_lowerCAmelCase )
return elem
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : T , _lowerCAmelCase : int ):
# Update the weight of the given key
SCREAMING_SNAKE_CASE_ = self.position_map[elem]
SCREAMING_SNAKE_CASE_ = (elem, weight)
if position > 0:
SCREAMING_SNAKE_CASE_ = get_parent_position(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_lowerCAmelCase )
else:
self._bubble_down(_lowerCAmelCase )
else:
self._bubble_down(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : T ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
SCREAMING_SNAKE_CASE_ = self.position_map[elem]
if curr_pos == 0:
return None
SCREAMING_SNAKE_CASE_ = get_parent_position(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[curr_pos]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_up(_lowerCAmelCase )
return None
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : T ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
SCREAMING_SNAKE_CASE_ = self.position_map[elem]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[curr_pos]
SCREAMING_SNAKE_CASE_ = get_child_left_position(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = get_child_right_position(_lowerCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[child_left_position]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_down(_lowerCAmelCase )
if child_left_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_down(_lowerCAmelCase )
else:
return None
if child_right_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_down(_lowerCAmelCase )
return None
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
# Swap the nodes at the given positions
SCREAMING_SNAKE_CASE_ = self.heap[nodea_pos][0]
SCREAMING_SNAKE_CASE_ = self.heap[nodea_pos][0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
SCREAMING_SNAKE_CASE_ = nodea_pos
SCREAMING_SNAKE_CASE_ = nodea_pos
class lowerCamelCase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 0
def __repr__( self : Optional[int] ):
return str(self.connections )
def __len__( self : Tuple ):
return self.nodes
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : T ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
SCREAMING_SNAKE_CASE_ = {}
self.nodes += 1
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : T , _lowerCAmelCase : T , _lowerCAmelCase : int ):
# Add an edge between 2 nodes in the graph
self.add_node(_lowerCAmelCase )
self.add_node(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = weight
SCREAMING_SNAKE_CASE_ = weight
def UpperCAmelCase_ ( __UpperCAmelCase : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
SCREAMING_SNAKE_CASE_ = {node: maxsize for node in graph.connections}
SCREAMING_SNAKE_CASE_ = {node: None for node in graph.connections}
SCREAMING_SNAKE_CASE_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__UpperCAmelCase , __UpperCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
SCREAMING_SNAKE_CASE_ = priority_queue.extract_min()
SCREAMING_SNAKE_CASE_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
SCREAMING_SNAKE_CASE_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__UpperCAmelCase , dist[neighbour] )
SCREAMING_SNAKE_CASE_ = node
# running prim's algorithm
while not priority_queue.is_empty():
SCREAMING_SNAKE_CASE_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
SCREAMING_SNAKE_CASE_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__UpperCAmelCase , dist[neighbour] )
SCREAMING_SNAKE_CASE_ = node
return dist, parent
| 210
| 1
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_UpperCamelCase : ClassVar[Features] = Features({"image": Image()} )
_UpperCamelCase : ClassVar[Features] = Features({"labels": ClassLabel} )
_UpperCamelCase : str = "image"
_UpperCamelCase : str = "labels"
def __A ( self , a__ ):
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , a__ ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
_lowerCAmelCase : str = copy.deepcopy(self )
_lowerCAmelCase : Dict = self.label_schema.copy()
_lowerCAmelCase : str = features[self.label_column]
_lowerCAmelCase : Optional[Any] = label_schema
return task_template
@property
def __A ( self ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 44
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = XGLMTokenizer
_UpperCamelCase : List[Any] = XGLMTokenizerFast
_UpperCamelCase : Dict = True
_UpperCamelCase : Tuple = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[str] = """<pad>"""
_lowerCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(a__ ) , 1008 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __A ( self ):
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
_lowerCAmelCase : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __A ( self ):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def __A ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
_lowerCAmelCase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=a__ )
_lowerCAmelCase : List[str] = pickle.dumps(a__ )
pickle.loads(a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : List[Any] = tokenizer.tokenize(a__ )
_lowerCAmelCase : Tuple = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : str = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : int = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = tokenizer.encode(a__ )
_lowerCAmelCase : List[Any] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __A ( self ):
_lowerCAmelCase : int = """Hello World!"""
_lowerCAmelCase : Optional[int] = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
_lowerCAmelCase : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
_lowerCAmelCase : List[str] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
# fmt: off
_lowerCAmelCase : List[str] = {
"""input_ids""": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""facebook/xglm-564M""" , padding=a__ , )
| 44
| 1
|
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = FunnelTokenizer
UpperCamelCase_ : int = FunnelTokenizerFast
UpperCamelCase_ : List[Any] = True
UpperCamelCase_ : int = True
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
_UpperCAmelCase : List[str] = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _lowerCAmelCase ( self : Tuple , **lowerCAmelCase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowerCAmelCase ( self : str , **lowerCAmelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = "UNwant\u00E9d,running"
_UpperCAmelCase : Tuple = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : int = self.tokenizer_class(self.vocab_file )
_UpperCAmelCase : Optional[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCAmelCase__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [7, 4, 5, 1_0, 8, 9] )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
_UpperCAmelCase : Optional[Any] = tokenizer("UNwant\u00E9d,running" )
_UpperCAmelCase : List[Any] = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
_UpperCAmelCase : Union[str, Any] = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 17
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__a = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['LayoutLMv2FeatureExtractor']
__a = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 17
| 1
|
import random
def __UpperCAmelCase ( a_):
snake_case_ = num - 1
snake_case_ = 0
while s % 2 == 0:
snake_case_ = s // 2
t += 1
for _ in range(5):
snake_case_ = random.randrange(2 , num - 1)
snake_case_ = pow(a__ , a__ , a__)
if v != 1:
snake_case_ = 0
while v != (num - 1):
if i == t - 1:
return False
else:
snake_case_ = i + 1
snake_case_ = (v**2) % num
return True
def __UpperCAmelCase ( a_):
if num < 2:
return False
snake_case_ = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(a__)
def __UpperCAmelCase ( a_ = 10_24):
while True:
snake_case_ = random.randrange(2 ** (keysize - 1) , 2 ** (keysize))
if is_prime_low_num(a__):
return num
if __name__ == "__main__":
lowercase = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 178
|
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
UpperCAmelCase : Optional[Any] = None
try:
import msvcrt
except ImportError:
UpperCAmelCase : List[Any] = None
try:
import fcntl
except ImportError:
UpperCAmelCase : int = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
UpperCAmelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
UpperCAmelCase : List[Any] = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
UpperCAmelCase : Tuple = '3.0.12'
UpperCAmelCase : str = None
def a__ ( ):
"""simple docstring"""
global _logger
__SCREAMING_SNAKE_CASE = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = lock_file
return None
def __str__( self : str ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = lock
return None
def __enter__( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.lock
def __exit__( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
"""simple docstring"""
self.lock.release()
return None
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str=-1 , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__SCREAMING_SNAKE_CASE = self.hash_filename_if_too_long(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# The path to the lock file.
__SCREAMING_SNAKE_CASE = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__SCREAMING_SNAKE_CASE = None
# The default timeout value.
__SCREAMING_SNAKE_CASE = timeout
# We use this lock primarily for the lock counter.
__SCREAMING_SNAKE_CASE = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__SCREAMING_SNAKE_CASE = 0
return None
@property
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return self._lock_file
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self._timeout
@timeout.setter
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = float(__SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self._lock_file_fd is not None
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=0.05 ) -> Optional[Any]:
"""simple docstring"""
if timeout is None:
__SCREAMING_SNAKE_CASE = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__SCREAMING_SNAKE_CASE = id(self )
__SCREAMING_SNAKE_CASE = self._lock_file
__SCREAMING_SNAKE_CASE = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(__SCREAMING_SNAKE_CASE )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__SCREAMING_SNAKE_CASE = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> Dict:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__SCREAMING_SNAKE_CASE = id(self )
__SCREAMING_SNAKE_CASE = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
__SCREAMING_SNAKE_CASE = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Tuple:
"""simple docstring"""
self.release()
return None
def __del__( self : str ) -> Union[str, Any]:
"""simple docstring"""
self.release(force=__SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.path.basename(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > max_length and max_length > 0:
__SCREAMING_SNAKE_CASE = os.path.dirname(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = str(hash(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = filename[: max_length - len(__SCREAMING_SNAKE_CASE ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return path
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict=-1 , __SCREAMING_SNAKE_CASE : Dict=None ) -> List[Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__SCREAMING_SNAKE_CASE , timeout=__SCREAMING_SNAKE_CASE , max_filename_length=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE = os.open(self._lock_file , __SCREAMING_SNAKE_CASE )
except OSError:
pass
else:
try:
msvcrt.locking(__SCREAMING_SNAKE_CASE , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = fd
return None
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self._lock_file_fd
__SCREAMING_SNAKE_CASE = None
msvcrt.locking(__SCREAMING_SNAKE_CASE , msvcrt.LK_UNLCK , 1 )
os.close(__SCREAMING_SNAKE_CASE )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=-1 , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.statvfs(os.path.dirname(__SCREAMING_SNAKE_CASE ) ).f_namemax
super().__init__(__SCREAMING_SNAKE_CASE , timeout=__SCREAMING_SNAKE_CASE , max_filename_length=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__SCREAMING_SNAKE_CASE = os.open(self._lock_file , __SCREAMING_SNAKE_CASE )
try:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = fd
return None
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self._lock_file_fd
__SCREAMING_SNAKE_CASE = None
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
os.close(__SCREAMING_SNAKE_CASE )
return None
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE = os.open(self._lock_file , __SCREAMING_SNAKE_CASE )
except OSError:
pass
else:
__SCREAMING_SNAKE_CASE = fd
return None
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
os.close(self._lock_file_fd )
__SCREAMING_SNAKE_CASE = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
UpperCAmelCase : Dict = None
if msvcrt:
UpperCAmelCase : Optional[int] = WindowsFileLock
elif fcntl:
UpperCAmelCase : Optional[Any] = UnixFileLock
else:
UpperCAmelCase : int = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 267
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __A :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Any=64 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : List[str]=37 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Any=10 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Any=[1, 16, 4, 4] , UpperCAmelCase_ : Optional[int]=None , ) ->str:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = scope
snake_case_ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
snake_case_ = (self.image_size // 32) ** 2
snake_case_ = num_patches + 1
def lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCAmelCase_ , )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ = ViTHybridModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ) ->int:
"""simple docstring"""
snake_case_ = self.type_sequence_label_size
snake_case_ = ViTHybridForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: str = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__lowercase: int = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__lowercase: Optional[Any] = False
__lowercase: List[Any] = False
__lowercase: int = False
def lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = ViTHybridModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) )
def lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(UpperCAmelCase_ )
for model_class in self.all_model_classes:
snake_case_ = model_class(config=UpperCAmelCase_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
snake_case_ = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = ViTHybridModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def _a ( ) -> Tuple:
snake_case_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __A (unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : int ) ->int:
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self : Dict ) ->Dict:
"""simple docstring"""
snake_case_ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCAmelCase_ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase_ )
# verify the logits
snake_case_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
snake_case_ = torch.tensor([-1.9_090, -0.4_993, -0.2_389] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
@require_accelerate
def lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
snake_case_ = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
snake_case_ = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" )
snake_case_ = model(**UpperCAmelCase_ )
snake_case_ = outputs.logits
# model predicts one of the 1000 ImageNet classes
snake_case_ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 233
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> int:
snake_case_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> List[Any]:
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ = """"""
else:
snake_case_ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
snake_case_ = dct.pop(_SCREAMING_SNAKE_CASE )
snake_case_ = val
def _a ( ) -> Dict:
snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ) -> Any:
snake_case_ = ViTConfig()
# patch_size
if model_name[-1] == "8":
snake_case_ = 8
# set labels if required
if not base_model:
snake_case_ = 1_000
snake_case_ = """huggingface/label-files"""
snake_case_ = """imagenet-1k-id2label.json"""
snake_case_ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
snake_case_ = 384
snake_case_ = 1_536
snake_case_ = 12
snake_case_ = 6
# load original model from torch hub
snake_case_ = torch.hub.load("""facebookresearch/dino:main""" , _SCREAMING_SNAKE_CASE )
original_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ = original_model.state_dict()
if base_model:
remove_classification_head_(_SCREAMING_SNAKE_CASE )
snake_case_ = create_rename_keys(_SCREAMING_SNAKE_CASE , base_model=_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
if base_model:
snake_case_ = ViTModel(_SCREAMING_SNAKE_CASE , add_pooling_layer=_SCREAMING_SNAKE_CASE ).eval()
else:
snake_case_ = ViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by ViTImageProcessor
snake_case_ = ViTImageProcessor()
snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case_ = encoding["""pixel_values"""]
snake_case_ = model(_SCREAMING_SNAKE_CASE )
if base_model:
snake_case_ = original_model(_SCREAMING_SNAKE_CASE )
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
snake_case_ = original_model(_SCREAMING_SNAKE_CASE )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE , outputs.logits , atol=1E-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 233
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowercase ( A_ )-> YolosConfig:
'''simple docstring'''
a : Any = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
a : List[str] = 192
a : int = 768
a : Dict = 12
a : Any = 3
a : List[Any] = [800, 1_333]
a : List[str] = False
elif yolos_name == "yolos_s_dWr":
a : str = 330
a : Optional[int] = 14
a : Dict = 6
a : Union[str, Any] = 1_320
elif "yolos_s" in yolos_name:
a : Any = 384
a : List[Any] = 1_536
a : Any = 12
a : List[Any] = 6
elif "yolos_b" in yolos_name:
a : Dict = [800, 1_344]
a : Tuple = 91
a : Union[str, Any] = "huggingface/label-files"
a : str = "coco-detection-id2label.json"
a : Union[str, Any] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : int = {int(A_ ): v for k, v in idalabel.items()}
a : List[Any] = idalabel
a : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase ( A_ , A_ , A_ = False )-> Tuple:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a : Optional[int] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
a : Any = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a : Optional[Any] = in_proj_weight[: config.hidden_size, :]
a : Any = in_proj_bias[: config.hidden_size]
a : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a : Any = in_proj_weight[-config.hidden_size :, :]
a : str = in_proj_bias[-config.hidden_size :]
def lowercase ( A_ )-> str:
'''simple docstring'''
if "backbone" in name:
a : Any = name.replace("backbone" , "vit" )
if "cls_token" in name:
a : str = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
a : Union[str, Any] = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
a : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
a : Optional[Any] = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
a : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
a : Optional[int] = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
a : List[str] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
a : Optional[int] = name.replace("attn" , "attention.self" )
if "norm1" in name:
a : List[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a : List[Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a : int = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a : str = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
a : Union[str, Any] = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
a : int = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
a : Union[str, Any] = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowercase ( A_ , A_ )-> dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
a : Optional[int] = orig_state_dict.pop(A_ )
if "qkv" in key:
a : Optional[Any] = key.split("." )
a : List[str] = int(key_split[2] )
a : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
a : Union[str, Any] = val[:dim, :]
a : Union[str, Any] = val[
dim : dim * 2, :
]
a : List[str] = val[-dim:, :]
else:
a : Optional[Any] = val[:dim]
a : List[Any] = val[dim : dim * 2]
a : int = val[-dim:]
else:
a : int = val
return orig_state_dict
def lowercase ( )-> torch.Tensor:
'''simple docstring'''
a : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
a : str = Image.open(requests.get(A_ , stream=A_ ).raw )
return im
@torch.no_grad()
def lowercase ( A_ , A_ , A_ , A_ = False )-> Any:
'''simple docstring'''
a : str = get_yolos_config(A_ )
# load original state_dict
a : List[str] = torch.load(A_ , map_location="cpu" )["model"]
# load 🤗 model
a : List[str] = YolosForObjectDetection(A_ )
model.eval()
a : Tuple = convert_state_dict(A_ , A_ )
model.load_state_dict(A_ )
# Check outputs on an image, prepared by YolosImageProcessor
a : Dict = 800 if yolos_name != "yolos_ti" else 512
a : List[str] = YolosImageProcessor(format="coco_detection" , size=A_ )
a : Optional[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
a : str = model(**A_ )
a , a : Tuple = outputs.logits, outputs.pred_boxes
a , a : Dict = None, None
if yolos_name == "yolos_ti":
a : str = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
a : int = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
a : Any = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
a : str = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
a : Optional[Any] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
a : Optional[int] = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
a : List[str] = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
a : List[Any] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
a : Any = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
a : Union[str, Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , A_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , A_ , atol=1e-4 )
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A_ )
if push_to_hub:
a : Tuple = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
a : Any = model_mapping[yolos_name]
image_processor.push_to_hub(A_ , organization="hustvl" )
model.push_to_hub(A_ , organization="hustvl" )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowercase = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 40
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowercase : Dict = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
A : Union[str, Any] = os.path.abspath(snake_case__ )
logger.info(F'Loading PyTorch weights from {pt_path}' )
A : Any = torch.load(snake_case__ , map_location='''cpu''' )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
A : List[str] = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
A : Any = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ )
return flax_state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool:
return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
A : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
A : Tuple = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
A : Dict = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
A : Any = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
A : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ):
A : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ):
A : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A : Dict = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A : List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
A : Dict = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
A : List[Any] = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
A : List[str] = pt_tuple_key[-2] + '''_v'''
if name is not None:
A : int = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
A : int = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
A : List[str] = flax_model.params['''params''']
else:
A : Dict = flax_model.params
A : List[Any] = flatten_dict(snake_case__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
A : List[str] = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(snake_case__ )
A : int = {}
A : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
A : int = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : str = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
A : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
A : Any = pt_tuple_key[1:]
# Correctly rename weight parameters
A, A : Dict = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
A : Any = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
A : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
A : Tuple = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
A : List[str] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
import torch
# Load the index
A : Union[str, Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
A : List[str] = torch.load(snake_case__ )
A : int = {k: v.numpy() for k, v in pt_state_dict.items()}
A : Tuple = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
A : Optional[int] = flax_model.params['''params''']
A : List[Any] = flatten_dict(snake_case__ )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
A : Dict = flax_model.params
A : Tuple = flatten_dict(snake_case__ )
A : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
A : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : int = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
A : List[str] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
A : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
A, A : Any = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
A : int = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
A : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
A : Optional[int] = jnp.asarray(snake_case__ )
continue
if "var" in flax_key[-1]:
A : Optional[int] = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = os.path.abspath(snake_case__ )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
A : List[str] = getattr(snake_case__ , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(snake_case__ , '''rb''' ) as state_f:
try:
A : int = from_bytes(snake_case__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
A : List[str] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values()
if any(snake_case__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
A : Optional[Any] = jax.tree_util.tree_map(
lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ )
A : Union[str, Any] = flatten_dict(snake_case__ )
A : List[Any] = pt_model.state_dict()
A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
A : Tuple = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
A : int = []
A : Any = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix
A : int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
A : List[str] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
A : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict:
# conv layer
A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
A : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict:
# linear layer
A : Tuple = flax_key_tuple[:-1] + ('''weight''',)
A : Tuple = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
A : Tuple = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
A : Tuple = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
A : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
A : Union[str, Any] = '''.'''.join(snake_case__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
A : int = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
A : Optional[int] = key.split('''.''' )
A : Dict = None
if key_components[-3::2] == ["parametrizations", "original0"]:
A : List[str] = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
A : List[Any] = key_components[-2] + '''_v'''
if name is not None:
A : str = key_components[:-3] + [name]
A : Optional[Any] = '''.'''.join(snake_case__ )
A : Optional[Any] = key
if flax_key in special_pt_names:
A : Optional[Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
A : Dict = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor
A : Dict = torch.from_numpy(snake_case__ )
# remove from missing keys
missing_keys.remove(snake_case__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case__ )
pt_model.load_state_dict(snake_case__ )
# re-transform missing_keys to list
A : List[Any] = list(snake_case__ )
if len(snake_case__ ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(snake_case__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 3
| 0
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__lowercase = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
__lowercase = f'''https://www.google.com/search?q={query}&num=100'''
__lowercase = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
__lowercase = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
__lowercase = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 226
|
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase ( )-> Union[str, Any]:
'''simple docstring'''
a : Union[str, Any] = torch.nn.Linear(2 , 4 )
a : Tuple = torch.optim.AdamW(model.parameters() , lr=1.0 )
a : Union[str, Any] = torch.optim.lr_scheduler.OneCycleLR(A_ , max_lr=0.0_1 , steps_per_epoch=2 , epochs=1 )
a : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
a : int = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase ( A_ )-> List[Any]:
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
a : Optional[int] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(A_ )
class _A ( _a ):
"""simple docstring"""
@require_cuda
def __snake_case ( self : Any):
a : List[str] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__UpperCAmelCase):
a : Any = Accelerator(cpu=__UpperCAmelCase)
def __snake_case ( self : List[Any]):
a : str = Accelerator()
a : Optional[Any] = GradientState()
assert state.num_steps == 1
a : Dict = 4
assert state.num_steps == 4
assert state.sync_gradients is True
a : Optional[int] = False
assert state.sync_gradients is False
GradientState._reset_state()
def __snake_case ( self : str):
a : int = Accelerator()
a , a , a , a , a : Tuple = create_components()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Tuple = accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
self.assertTrue(prepared_model in accelerator._models)
self.assertTrue(prepared_optimizer in accelerator._optimizers)
self.assertTrue(prepared_scheduler in accelerator._schedulers)
self.assertTrue(prepared_train_dl in accelerator._dataloaders)
self.assertTrue(prepared_valid_dl in accelerator._dataloaders)
def __snake_case ( self : Dict):
a : Dict = Accelerator()
a , a , a , a , a : Any = create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
accelerator.free_memory()
self.assertTrue(len(accelerator._models) == 0)
self.assertTrue(len(accelerator._optimizers) == 0)
self.assertTrue(len(accelerator._schedulers) == 0)
self.assertTrue(len(accelerator._dataloaders) == 0)
def __snake_case ( self : int):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any):
pass
with patch("torch.cuda.set_device" , __UpperCAmelCase), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64"):
a : int = Accelerator()
self.assertEqual(str(accelerator.state.device) , "cuda:64")
def __snake_case ( self : List[str]):
a : Tuple = Accelerator()
a , a , a , a , a : Optional[Any] = create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : Dict = get_signature(__UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase)
# make sure random weights don't match
load_random_weights(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) > 1e-3)
# make sure loaded weights match
accelerator.load_state(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) < 1e-3)
def __snake_case ( self : Optional[int]):
a : str = Accelerator()
a , a , a , a , a : Dict = create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : Union[str, Any] = get_signature(__UpperCAmelCase)
# saving hook
def save_config(__UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any]):
a : Tuple = {"class_name": models[0].__class__.__name__}
with open(os.path.join(__UpperCAmelCase , "data.json") , "w") as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase)
# loading hook
def load_config(__UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]):
with open(os.path.join(__UpperCAmelCase , "data.json") , "r") as f:
a : Optional[Any] = json.load(__UpperCAmelCase)
a : Tuple = config["class_name"]
a : Optional[int] = accelerator.register_save_state_pre_hook(__UpperCAmelCase)
a : Tuple = accelerator.register_load_state_pre_hook(__UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase)
# make sure random weights don't match with hooks
load_random_weights(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) > 1e-3)
# random class name to verify correct one is loaded
a : int = "random"
# make sure loaded weights match with hooks
accelerator.load_state(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) < 1e-3)
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__)
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase)
# make sure random weights don't match with hooks removed
load_random_weights(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) > 1e-3)
# random class name to verify correct one is loaded
a : Dict = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(__UpperCAmelCase)
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase)) < 1e-3)
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__)
def __snake_case ( self : Optional[Any]):
a : List[str] = Accelerator()
a , a , a , a , a : int = create_components()
a : Tuple = None
# This should work
a , a , a , a , a , a : Any = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
self.assertTrue(dummy_obj is None)
def __snake_case ( self : List[str]):
a : str = Accelerator()
a , a , a , a , a : List[Any] = create_components()
a : Union[str, Any] = [1, 2, 3]
# This should work
a , a , a , a , a , a : str = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(__UpperCAmelCase , "_is_accelerate_prepared" , __UpperCAmelCase) , __UpperCAmelCase , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def __snake_case ( self : Optional[int]):
from transformers import AutoModelForCausalLM
a : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=__UpperCAmelCase , device_map={"": 0} , )
a : Tuple = Accelerator()
# This should work
a : List[Any] = accelerator.prepare(__UpperCAmelCase)
@slow
@require_bnb
def __snake_case ( self : Optional[int]):
from transformers import AutoModelForCausalLM
a : Dict = Accelerator()
with init_empty_weights():
a : Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
a : Union[str, Any] = infer_auto_device_map(__UpperCAmelCase)
a : str = "cpu"
a : int = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , llm_inta_enable_fpaa_cpu_offload=__UpperCAmelCase)
# This should not work and get value error
with self.assertRaises(__UpperCAmelCase):
a : Optional[int] = accelerator.prepare(__UpperCAmelCase)
@slow
@require_bnb
@require_multi_gpu
def __snake_case ( self : Optional[int]):
from transformers import AutoModelForCausalLM
a : Union[str, Any] = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
a : List[str] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
a : Any = infer_auto_device_map(__UpperCAmelCase)
a : Dict = 1
a : Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=__UpperCAmelCase , device_map=__UpperCAmelCase , )
a : int = Accelerator()
# This should not work and get value error
with self.assertRaises(__UpperCAmelCase):
a : Optional[int] = accelerator.prepare(__UpperCAmelCase)
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __snake_case ( self : Tuple):
from transformers import AutoModelForCausalLM
with init_empty_weights():
a : List[str] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
a : Tuple = infer_auto_device_map(__UpperCAmelCase)
a : str = 1
a : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=__UpperCAmelCase , device_map=__UpperCAmelCase , )
a : str = Accelerator()
# This should work
a : Any = accelerator.prepare(__UpperCAmelCase)
@require_cuda
def __snake_case ( self : List[Any]):
a : Tuple = torch.nn.Linear(10 , 10)
a : int = torch.optim.SGD(model.parameters() , lr=0.01)
a : Optional[Any] = Accelerator(cpu=__UpperCAmelCase)
a : List[str] = accelerator.prepare(__UpperCAmelCase)
| 226
| 1
|
from __future__ import annotations
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = str(lowercase )
return len(lowercase ) == 9 and set(lowercase ) == set('''123456789''' )
def UpperCAmelCase ( ):
"""simple docstring"""
for base_num in range(9999 , 4999 , -1 ):
__lowercase = 100002 * base_num
if is_9_pandigital(lowercase ):
return candidate
for base_num in range(333 , 99 , -1 ):
__lowercase = 1002003 * base_num
if is_9_pandigital(lowercase ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 210
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def UpperCAmelCase ( lowercase , lowercase , lowercase=None , lowercase=None ):
"""simple docstring"""
if attention_mask is None:
__lowercase = tf.cast(tf.math.not_equal(lowercase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _UpperCamelCase :
"""simple docstring"""
__a : Tuple = OPTConfig
__a : int = {}
__a : Dict = '''gelu'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=99 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=20 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=16 , lowerCAmelCase__=16 , ) -> Tuple:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
__lowercase = embed_dim
__lowercase = word_embed_proj_dim
__lowercase = False
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowercase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase__ , **self.config_updates , )
__lowercase = prepare_opt_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
__lowercase = TFOPTModel(config=lowerCAmelCase__ )
__lowercase = inputs_dict['''input_ids''']
__lowercase = input_ids[:1, :]
__lowercase = inputs_dict['''attention_mask'''][:1, :]
__lowercase = 1
# first forward pass
__lowercase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
__lowercase , __lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowercase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowercase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowercase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
__lowercase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowercase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowercase = output_from_no_past[:, -3:, random_slice_idx]
__lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-3 )
@require_tf
class _UpperCamelCase ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : int = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__a : Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
__a : Dict = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
__a : List[str] = False
__a : Optional[Any] = False
__a : Union[str, Any] = False
__a : List[Any] = 10
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = TFOPTModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase__ , lowerCAmelCase__ ):
if hasattr(lowerCAmelCase__ , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase__ , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__lowercase = model_class(config=lowerCAmelCase__ )
__lowercase = _get_word_embedding_weight(lowerCAmelCase__ , model.get_input_embeddings() )
__lowercase = _get_word_embedding_weight(lowerCAmelCase__ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase__ )
__lowercase = _get_word_embedding_weight(lowerCAmelCase__ , model.get_input_embeddings() )
__lowercase = _get_word_embedding_weight(lowerCAmelCase__ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowercase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase__ )
# check that weights remain the same after resizing
__lowercase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowercase = False
self.assertTrue(lowerCAmelCase__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase__ )
__lowercase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowercase = False
self.assertTrue(lowerCAmelCase__ )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return tf.constant(lowercase , dtype=tf.intaa )
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
__a : List[str] = 99
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowercase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowercase = input_ids.shape[0]
__lowercase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
__lowercase = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__lowercase = tf.not_equal(lowerCAmelCase__ , model.config.pad_token_id )
with tf.GradientTape():
__lowercase = model(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ).last_hidden_state
__lowercase = (1, 11, 5_12)
self.assertEqual(output.shape , lowerCAmelCase__ )
__lowercase = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=4E-3 ) )
__lowercase = tf.function(lowerCAmelCase__ , jit_compile=lowerCAmelCase__ )
__lowercase = xla_generate(lowerCAmelCase__ , lowerCAmelCase__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=4E-2 ) )
@require_tf
@slow
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
super().setUp()
__lowercase = '''facebook/opt-350m'''
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowercase = GPTaTokenizer.from_pretrained(self.path_model )
__lowercase = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowercase = tokenizer(lowerCAmelCase__ , return_tensors='''tf''' , padding=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__lowercase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowercase = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4 ) )
__lowercase = tf.function(lowerCAmelCase__ , jit_compile=lowerCAmelCase__ )
__lowercase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-4 ) )
@require_tf
@slow
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''facebook/opt-125m'''
__lowercase = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowercase = []
__lowercase = GPTaTokenizer.from_pretrained(lowerCAmelCase__ )
__lowercase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__ )
for prompt in self.prompts:
__lowercase = tokenizer(lowerCAmelCase__ , return_tensors='''tf''' ).input_ids
__lowercase = model.generate(lowerCAmelCase__ , max_length=10 )
__lowercase = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = '''facebook/opt-350m'''
__lowercase = GPTaTokenizer.from_pretrained(lowerCAmelCase__ )
__lowercase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__ )
__lowercase = '''left'''
# use different length sentences to test batching
__lowercase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__lowercase = tokenizer(lowerCAmelCase__ , return_tensors='''tf''' , padding=lowerCAmelCase__ )
__lowercase = inputs['''input_ids''']
__lowercase = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs['''attention_mask'''] )
__lowercase = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__lowercase = model.generate(input_ids=lowerCAmelCase__ )
__lowercase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
__lowercase = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__lowercase = model.generate(input_ids=lowerCAmelCase__ , max_length=model.config.max_length - num_paddings )
__lowercase = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ )
__lowercase = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = '''facebook/opt-350m'''
__lowercase = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowercase = []
__lowercase = GPTaTokenizer.from_pretrained(lowerCAmelCase__ )
__lowercase = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__ )
for prompt in self.prompts:
__lowercase = tokenizer(lowerCAmelCase__ , return_tensors='''tf''' ).input_ids
__lowercase = model.generate(lowerCAmelCase__ , max_length=10 )
__lowercase = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 210
| 1
|
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_: int = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class __A ( a__ ):
def __init__(self : List[Any] , *__a : Dict , **__a : Optional[int] ):
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _lowercase (self : int , __a : Optional[Any]=None ):
UpperCAmelCase_ = {}
if top_k is not None:
UpperCAmelCase_ = top_k
return {}, {}, postprocess_params
def __call__(self : Any , __a : Union[str, Any] , **__a : Optional[Any] ):
return super().__call__(_lowerCamelCase , **_lowerCamelCase )
def _lowercase (self : Optional[Any] , __a : Optional[int] ):
UpperCAmelCase_ = load_image(_lowerCamelCase )
UpperCAmelCase_ = self.image_processor(images=_lowerCamelCase , return_tensors=self.framework )
return model_inputs
def _lowercase (self : Optional[int] , __a : Union[str, Any] ):
UpperCAmelCase_ = self.model(**_lowerCamelCase )
return model_outputs
def _lowercase (self : List[Any] , __a : Optional[int] , __a : Dict=5 ):
if top_k > self.model.config.num_labels:
UpperCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase_ = probs.topk(_lowerCamelCase )
elif self.framework == "tf":
UpperCAmelCase_ = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCAmelCase_ = tf.math.top_k(_lowerCamelCase , k=_lowerCamelCase )
UpperCAmelCase_ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCAmelCase_ = scores.tolist()
UpperCAmelCase_ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_lowerCamelCase , _lowerCamelCase )]
| 369
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_: Tuple =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: List[str] ={'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
SCREAMING_SNAKE_CASE_: Optional[int] ={
'abeja/gpt-neox-japanese-2.7b': 20_48,
}
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
UpperCAmelCase_ = collections.OrderedDict()
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(snake_case_ ):
UpperCAmelCase_ = b
UpperCAmelCase_ = idx
for wd in b:
UpperCAmelCase_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __A ( UpperCamelCase__ ):
a__ : List[str] = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__(self : Any , __a : List[Any] , __a : Dict , __a : int="<|endoftext|>" , __a : Union[str, Any]="<|endoftext|>" , __a : int="<|startoftext|>" , __a : Tuple="<|endoftext|>" , __a : Optional[int]=False , **__a : int , ):
super().__init__(
unk_token=__a , pad_token=__a , bos_token=__a , eos_token=__a , do_clean_text=__a , **__a , )
if not os.path.isfile(__a ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(__a ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
UpperCAmelCase_ = do_clean_text
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = load_vocab_and_emoji(__a , __a )
UpperCAmelCase_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _lowercase (self : Optional[Any] ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def _lowercase (self : List[Any] ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _lowercase (self : List[Any] , __a : int ):
return self.subword_tokenizer.tokenize(__a , clean=self.do_clean_text )
def _lowercase (self : List[Any] , __a : List[str] ):
return self.vocab.get(__a , self.vocab.get(self.unk_token ) )
def _lowercase (self : int , __a : List[Any] ):
return self.subword_tokenizer.convert_id_to_token(__a )
def _lowercase (self : Dict , __a : str ):
UpperCAmelCase_ = "".join(__a ).strip()
return out_string
def _lowercase (self : int , __a : "Conversation" ):
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
def _lowercase (self : int , __a : str , __a : Optional[str] = None ):
UpperCAmelCase_ = 0
if os.path.isdir(__a ):
UpperCAmelCase_ = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCAmelCase_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(__a , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(",".join(__a ) + "\n" )
index += 1
with open(__a , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , __a )
return vocab_file, emoji_file
class __A ( UpperCamelCase__ ):
def __init__(self : List[Any] , __a : Dict , __a : Any , __a : int ):
UpperCAmelCase_ = vocab # same as swe
UpperCAmelCase_ = ids_to_tokens # same as bpe
UpperCAmelCase_ = emoji
UpperCAmelCase_ = np.max([len(__a ) for w in self.vocab.keys()] )
UpperCAmelCase_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
UpperCAmelCase_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
UpperCAmelCase_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
UpperCAmelCase_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCAmelCase_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
UpperCAmelCase_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCAmelCase_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCAmelCase_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__(self : Dict ):
return len(self.ids_to_tokens )
def _lowercase (self : str , __a : Union[str, Any] ):
UpperCAmelCase_ = self.content_repattera.sub("<URL>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<EMAIL>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<TEL>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<DATE>" , __a )
UpperCAmelCase_ = self.content_repattera.sub("<PRICE>" , __a )
UpperCAmelCase_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCAmelCase_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def _lowercase (self : Optional[Any] , __a : Union[str, Any] , __a : str=False ):
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace(" " , "<SP>" )
UpperCAmelCase_ = text.replace("\r\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\n" , "<BR>" )
UpperCAmelCase_ = text.replace("\r" , "<BR>" )
UpperCAmelCase_ = text.replace("\t" , "<TAB>" )
UpperCAmelCase_ = text.replace("—" , "ー" )
UpperCAmelCase_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCAmelCase_ = text.replace(__a , __a )
if clean:
UpperCAmelCase_ = self.clean_text(__a )
def check_simbol(__a : List[Any] ):
UpperCAmelCase_ = x.encode()
if len(__a ) == 1 and len(__a ) == 2:
UpperCAmelCase_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc2_a1 and c <= 0Xc2_bf)
or (c >= 0Xc7_80 and c <= 0Xc7_83)
or (c >= 0Xca_b9 and c <= 0Xcb_bf)
or (c >= 0Xcc_80 and c <= 0Xcd_a2)
):
return True
return False
def checkuae(__a : Tuple ):
UpperCAmelCase_ = x.encode()
if len(__a ) == 1 and len(__a ) == 3:
UpperCAmelCase_ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe2_80_80 and c <= 0Xe2_b0_7f:
return True
return False
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
while pos < len(__a ):
UpperCAmelCase_ = min(len(__a ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCAmelCase_ = [] # (token_id, token, pos)
for e in range(__a , __a , -1 ):
UpperCAmelCase_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__a ) > 2:
UpperCAmelCase_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__a ) > 0:
# the smallest token_id is adopted
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = sorted(__a , key=lambda __a : x[0] )[0]
result.append(__a )
UpperCAmelCase_ = e
else:
UpperCAmelCase_ = pos + 1
UpperCAmelCase_ = text[pos:end]
if check_simbol(__a ):
result.append("<KIGOU>" )
elif checkuae(__a ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
UpperCAmelCase_ = end
return result
def _lowercase (self : int , __a : Optional[Any] , __a : Optional[int]="\n" ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__a ) > 0:
words.append(bytearray(__a ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(__a )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(__a )
if len(__a ) > 0:
words.append(bytearray(__a ).decode("utf-8" , errors="replace" ) )
UpperCAmelCase_ = "".join(__a )
return text
| 106
| 0
|
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = FunnelTokenizer
__UpperCAmelCase : Optional[int] = FunnelTokenizerFast
__UpperCAmelCase : str = True
__UpperCAmelCase : str = True
def _lowercase ( self : str ):
super().setUp()
__lowercase = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__lowercase = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _lowercase ( self : List[Any], **UpperCAmelCase__ : int ):
return FunnelTokenizer.from_pretrained(self.tmpdirname, **UpperCAmelCase__ )
def _lowercase ( self : Optional[Any], **UpperCAmelCase__ : Union[str, Any] ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **UpperCAmelCase__ )
def _lowercase ( self : Any, UpperCAmelCase__ : Dict ):
__lowercase = "UNwant\u00E9d,running"
__lowercase = "unwanted, running"
return input_text, output_text
def _lowercase ( self : str ):
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCAmelCase__, ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ), [7, 4, 5, 1_0, 8, 9] )
def _lowercase ( self : Optional[int] ):
__lowercase = self.get_tokenizers(do_lower_case=UpperCAmelCase__ )
for tokenizer in tokenizers:
__lowercase = tokenizer("UNwant\u00E9d,running" )
__lowercase = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"], [2] + [0] * sentence_len )
__lowercase = tokenizer("UNwant\u00E9d,running", "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"], [2] + [0] * sentence_len + [1] * sentence_len )
| 17
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int=1_0_0, UpperCAmelCase__ : Any=1_3, UpperCAmelCase__ : List[Any]=3_0, UpperCAmelCase__ : Dict=2, UpperCAmelCase__ : Any=3, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Optional[Any]=3_2, UpperCAmelCase__ : Any=5, UpperCAmelCase__ : Any=4, UpperCAmelCase__ : Any=3_7, UpperCAmelCase__ : Optional[int]="gelu", UpperCAmelCase__ : Dict=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Dict=1_0, UpperCAmelCase__ : Tuple=0.02, UpperCAmelCase__ : List[Any]=3, ):
__lowercase = parent
__lowercase = vocab_size
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = num_patches + 1
def _lowercase ( self : int ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size )
__lowercase = BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=UpperCAmelCase__, initializer_range=self.initializer_range, )
return config, pixel_values, labels
def _lowercase ( self : Any, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[str] ):
__lowercase = FlaxBeitModel(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : List[Any] ):
__lowercase = FlaxBeitForMaskedImageModeling(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowercase ( self : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any] ):
__lowercase = self.type_sequence_label_size
__lowercase = FlaxBeitForImageClassification(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = FlaxBeitForImageClassification(UpperCAmelCase__ )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _lowercase ( self : List[Any] ):
__lowercase = FlaxBeitModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__, has_text_modality=UpperCAmelCase__, hidden_size=3_7 )
def _lowercase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[int] ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(UpperCAmelCase__ )
__lowercase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1], UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = model_class(UpperCAmelCase__ )
@jax.jit
def model_jitted(UpperCAmelCase__ : str, **UpperCAmelCase__ : Dict ):
return model(pixel_values=UpperCAmelCase__, **UpperCAmelCase__ )
with self.subTest("JIT Enabled" ):
__lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__, UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape, output.shape )
def _lowercase ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def _lowercase ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
__lowercase = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(UpperCAmelCase__ )
def _A ( ) -> str:
'''simple docstring'''
__lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_vision
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Optional[int] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _lowercase ( self : Any ):
__lowercase = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ).pixel_values
# prepare bool_masked_pos
__lowercase = np.ones((1, 1_9_6), dtype=UpperCAmelCase__ )
# forward pass
__lowercase = model(pixel_values=UpperCAmelCase__, bool_masked_pos=UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3], UpperCAmelCase__, atol=1E-2 ) )
@slow
def _lowercase ( self : Any ):
__lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" )
# forward pass
__lowercase = model(**UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1_0_0_0)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
__lowercase = 2_8_1
self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
@slow
def _lowercase ( self : List[str] ):
__lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" )
# forward pass
__lowercase = model(**UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 2_1_8_4_1)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
__lowercase = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
| 17
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Any = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 72
|
'''simple docstring'''
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
a : Any = get_logger(__name__)
a : Union[str, Any] = Path(__file__).parent / 'model_card_template.md'
a : List[Any] = uuida().hex
a : List[str] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
a : str = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
a : Optional[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def __magic_name__ ( __UpperCAmelCase = None ) -> str:
'''simple docstring'''
snake_case_ = F"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"; torch/{_torch_version}"
if is_flax_available():
ua += F"; jax/{_jax_version}"
ua += F"; flax/{_flax_version}"
if is_onnx_available():
ua += F"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''', '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__UpperCAmelCase, __UpperCAmelCase ):
ua += "; " + "; ".join(F"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(__UpperCAmelCase, __UpperCAmelCase ):
ua += "; " + user_agent
return ua
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = None, __UpperCAmelCase = None ) -> Optional[Any]:
'''simple docstring'''
if token is None:
snake_case_ = HfFolder.get_token()
if organization is None:
snake_case_ = whoami(__UpperCAmelCase )['''name''']
return F"{username}/{model_id}"
else:
return F"{organization}/{model_id}"
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(__UpperCAmelCase, '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
snake_case_ = args.hub_token if hasattr(__UpperCAmelCase, '''hub_token''' ) else None
snake_case_ = get_full_repo_name(__UpperCAmelCase, token=__UpperCAmelCase )
snake_case_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''', license='''apache-2.0''', library_name='''diffusers''', tags=[], datasets=args.dataset_name, metrics=[], ), template_path=__UpperCAmelCase, model_name=__UpperCAmelCase, repo_name=__UpperCAmelCase, dataset_name=args.dataset_name if hasattr(__UpperCAmelCase, '''dataset_name''' ) else None, learning_rate=args.learning_rate, train_batch_size=args.train_batch_size, eval_batch_size=args.eval_batch_size, gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__UpperCAmelCase, '''gradient_accumulation_steps''' ) else None
), adam_betaa=args.adam_betaa if hasattr(__UpperCAmelCase, '''adam_beta1''' ) else None, adam_betaa=args.adam_betaa if hasattr(__UpperCAmelCase, '''adam_beta2''' ) else None, adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCAmelCase, '''adam_weight_decay''' ) else None, adam_epsilon=args.adam_epsilon if hasattr(__UpperCAmelCase, '''adam_epsilon''' ) else None, lr_scheduler=args.lr_scheduler if hasattr(__UpperCAmelCase, '''lr_scheduler''' ) else None, lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCAmelCase, '''lr_warmup_steps''' ) else None, ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCAmelCase, '''ema_inv_gamma''' ) else None, ema_power=args.ema_power if hasattr(__UpperCAmelCase, '''ema_power''' ) else None, ema_max_decay=args.ema_max_decay if hasattr(__UpperCAmelCase, '''ema_max_decay''' ) else None, mixed_precision=args.mixed_precision, )
snake_case_ = os.path.join(args.output_dir, '''README.md''' )
model_card.save(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = None ) -> Optional[Any]:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case_ = str(Path(__UpperCAmelCase ).as_posix() )
snake_case_ = re.search(r'''snapshots/([^/]+)/''', __UpperCAmelCase )
if search is None:
return None
snake_case_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__UpperCAmelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
a : str = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
a : Optional[Any] = os.path.join(hf_cache_home, 'diffusers')
def __magic_name__ ( __UpperCAmelCase = None, __UpperCAmelCase = None ) -> None:
'''simple docstring'''
if new_cache_dir is None:
snake_case_ = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case_ = old_diffusers_cache
snake_case_ = Path(__UpperCAmelCase ).expanduser()
snake_case_ = Path(__UpperCAmelCase ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case_ = new_cache_dir / old_blob_path.relative_to(__UpperCAmelCase )
new_blob_path.parent.mkdir(parents=__UpperCAmelCase, exist_ok=__UpperCAmelCase )
os.replace(__UpperCAmelCase, __UpperCAmelCase )
try:
os.symlink(__UpperCAmelCase, __UpperCAmelCase )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
a : Tuple = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
a : Tuple = 0
else:
with open(cache_version_file) as f:
try:
a : Optional[Any] = int(f.read())
except ValueError:
a : List[str] = 0
if cache_version < 1:
a : Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
a : str = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'the directory exists and can be written to.'
)
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = None ) -> str:
'''simple docstring'''
if variant is not None:
snake_case_ = weights_name.split('''.''' )
snake_case_ = splits[:-1] + [variant] + splits[-1:]
snake_case_ = '''.'''.join(__UpperCAmelCase )
return weights_name
def __magic_name__ ( __UpperCAmelCase, *,
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=None, ) -> int:
'''simple docstring'''
snake_case_ = str(__UpperCAmelCase )
if os.path.isfile(__UpperCAmelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__UpperCAmelCase ):
if os.path.isfile(os.path.join(__UpperCAmelCase, __UpperCAmelCase ) ):
# Load from a PyTorch checkpoint
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) ):
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
return model_file
else:
raise EnvironmentError(
F"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__UpperCAmelCase ).base_version ) >= version.parse('''0.20.0''' )
):
try:
snake_case_ = hf_hub_download(
__UpperCAmelCase, filename=_add_variant(__UpperCAmelCase, __UpperCAmelCase ), cache_dir=__UpperCAmelCase, force_download=__UpperCAmelCase, proxies=__UpperCAmelCase, resume_download=__UpperCAmelCase, local_files_only=__UpperCAmelCase, use_auth_token=__UpperCAmelCase, user_agent=__UpperCAmelCase, subfolder=__UpperCAmelCase, revision=revision or commit_hash, )
warnings.warn(
F"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.", __UpperCAmelCase, )
return model_file
except: # noqa: E722
warnings.warn(
F"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCAmelCase, __UpperCAmelCase )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__UpperCAmelCase, __UpperCAmelCase )}' so that the correct variant file can be added.", __UpperCAmelCase, )
try:
# 2. Load model file as usual
snake_case_ = hf_hub_download(
__UpperCAmelCase, filename=__UpperCAmelCase, cache_dir=__UpperCAmelCase, force_download=__UpperCAmelCase, proxies=__UpperCAmelCase, resume_download=__UpperCAmelCase, local_files_only=__UpperCAmelCase, use_auth_token=__UpperCAmelCase, user_agent=__UpperCAmelCase, subfolder=__UpperCAmelCase, revision=revision or commit_hash, )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
'''this model name. Check the model page at '''
F"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
F"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
F"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
F"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
F" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
F" directory containing a file named {weights_name} or"
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
F"containing a file named {weights_name}" )
| 72
| 1
|
from __future__ import annotations
from typing import Any
class lowerCAmelCase ( __a ):
'''simple docstring'''
pass
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Any ) -> None:
"""simple docstring"""
__lowercase : Any = data
__lowercase : Node | None = None
def __iter__( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self
__lowercase : List[str] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__a )
yield node.data
__lowercase : Any = node.next_node
@property
def lowerCAmelCase ( self : int ) -> bool:
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowerCamelCase : Optional[int] = Node(1)
lowerCamelCase : List[str] = Node(2)
lowerCamelCase : Union[str, Any] = Node(3)
lowerCamelCase : Union[str, Any] = Node(4)
print(root_node.has_loop) # False
lowerCamelCase : List[str] = root_node.next_node
print(root_node.has_loop) # True
lowerCamelCase : Dict = Node(5)
lowerCamelCase : Any = Node(6)
lowerCamelCase : Optional[int] = Node(5)
lowerCamelCase : Optional[Any] = Node(6)
print(root_node.has_loop) # False
lowerCamelCase : List[str] = Node(1)
print(root_node.has_loop) # False
| 233
|
from math import pi
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 233
| 1
|
import math
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(UpperCamelCase , UpperCamelCase ):
_a = f'Input value of [number={number}] must be an integer'
raise TypeError(UpperCamelCase )
if number < 1:
_a = f'Input value of [number={number}] must be > 0'
raise ValueError(UpperCamelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_a = int(math.log(number // 3 , 2 ) ) + 2
_a = [3, 5]
_a = 2
_a = 3
for block in range(1 , UpperCamelCase ):
for _ in range(UpperCamelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
_snake_case : Optional[int] = 0
try:
_snake_case : int = proth(number)
except ValueError:
print(F'''ValueError: there is no {number}th Proth number''')
continue
print(F'''The {number}th Proth number: {value}''')
| 368
|
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def snake_case_ (UpperCamelCase : Dict ):
'''simple docstring'''
_a = {}
_a = job['''started_at''']
_a = job['''completed_at''']
_a = date_parser.parse(UpperCamelCase )
_a = date_parser.parse(UpperCamelCase )
_a = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_a = start
_a = end
_a = duration_in_min
return job_info
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int=None ):
'''simple docstring'''
_a = None
if token is not None:
_a = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'Bearer {token}'}
_a = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
_a = requests.get(UpperCamelCase , headers=UpperCamelCase ).json()
_a = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(UpperCamelCase ) for job in result['''jobs''']} )
_a = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(UpperCamelCase ):
_a = requests.get(url + f'&page={i + 2}' , headers=UpperCamelCase ).json()
job_time.update({job['''name''']: extract_time_from_single_job(UpperCamelCase ) for job in result['''jobs''']} )
return job_time
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
_snake_case : Tuple = parser.parse_args()
_snake_case : int = get_job_time(args.workflow_run_id)
_snake_case : int = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v['duration']}''')
| 179
| 0
|
from scipy.stats import spearmanr
import datasets
__A ="\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
__A ="\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
__A =R"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self : str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def snake_case__ ( self : Dict , a_ : Tuple , a_ : Union[str, Any] , a_ : List[Any]=False ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = spearmanr(a_ , a_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 226
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
__UpperCAmelCase : Optional[int] = DetaConfig(
backbone_config=_UpperCAmelCase , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=_UpperCAmelCase , with_box_refine=_UpperCAmelCase , two_stage=_UpperCAmelCase , )
# set labels
__UpperCAmelCase : Optional[int] = '''huggingface/label-files'''
if "o365" in model_name:
__UpperCAmelCase : Tuple = 3_66
__UpperCAmelCase : List[str] = '''object365-id2label.json'''
else:
__UpperCAmelCase : Any = 91
__UpperCAmelCase : int = '''coco-detection-id2label.json'''
__UpperCAmelCase : Optional[int] = num_labels
__UpperCAmelCase : List[str] = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__UpperCAmelCase : str = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : Optional[int] = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def a ( _UpperCAmelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[str] = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def a ( _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = dct.pop(_UpperCAmelCase )
__UpperCAmelCase : List[Any] = val
def a ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCAmelCase : str = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCAmelCase : List[str] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
__UpperCAmelCase : List[Any] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : Dict = in_proj_weight[:dim, :]
__UpperCAmelCase : List[str] = in_proj_bias[: dim]
__UpperCAmelCase : str = in_proj_weight[
dim : dim * 2, :
]
__UpperCAmelCase : Any = in_proj_bias[
dim : dim * 2
]
__UpperCAmelCase : Tuple = in_proj_weight[
-dim :, :
]
__UpperCAmelCase : int = in_proj_bias[-dim :]
# fmt: on
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : int = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__UpperCAmelCase : List[str] = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
__UpperCAmelCase : Tuple = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : Union[str, Any] = in_proj_weight[:hidden_size, :]
__UpperCAmelCase : List[Any] = in_proj_bias[:hidden_size]
__UpperCAmelCase : int = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__UpperCAmelCase : str = in_proj_bias[hidden_size : hidden_size * 2]
__UpperCAmelCase : Tuple = in_proj_weight[-hidden_size:, :]
__UpperCAmelCase : Optional[Any] = in_proj_bias[-hidden_size:]
def a ( ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a ( _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = get_deta_config(_UpperCAmelCase )
# load original state dict
if model_name == "deta-swin-large":
__UpperCAmelCase : Dict = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
__UpperCAmelCase : Any = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'Model name {model_name} not supported' )
__UpperCAmelCase : str = torch.load(_UpperCAmelCase , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(_UpperCAmelCase , param.shape )
# rename keys
__UpperCAmelCase : int = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_swin_q_k_v(_UpperCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(_UpperCAmelCase , _UpperCAmelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__UpperCAmelCase : Optional[Any] = state_dict.pop(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = val
if "input_proj" in key:
__UpperCAmelCase : Union[str, Any] = state_dict.pop(_UpperCAmelCase )
__UpperCAmelCase : List[str] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__UpperCAmelCase : Union[str, Any] = state_dict.pop(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = val
# finally, create HuggingFace model and load state dict
__UpperCAmelCase : Union[str, Any] = DetaForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
__UpperCAmelCase : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(_UpperCAmelCase )
# load image processor
__UpperCAmelCase : str = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
__UpperCAmelCase : str = prepare_img()
__UpperCAmelCase : Optional[int] = processor(images=_UpperCAmelCase , return_tensors='''pt''' )
__UpperCAmelCase : List[Any] = encoding['''pixel_values''']
__UpperCAmelCase : List[str] = model(pixel_values.to(_UpperCAmelCase ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__UpperCAmelCase : str = torch.tensor(
[[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] )
__UpperCAmelCase : Union[str, Any] = torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] )
elif model_name == "deta-swin-large-o365":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] )
__UpperCAmelCase : str = torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_UpperCAmelCase ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_UpperCAmelCase ) , atol=1e-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A =parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 226
| 1
|
"""simple docstring"""
A: int = {str(digit): digit**5 for digit in range(1_0)}
def _snake_case ( UpperCamelCase : int ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase ) )
def _snake_case ( ):
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(UpperCamelCase ) )
if __name__ == "__main__":
print(solution())
| 76
|
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def _snake_case ( UpperCamelCase : str ):
def decorator(UpperCamelCase : Optional[int] ):
UpperCAmelCase : List[Any] = getattr(UpperCamelCase , """handle_key""" , [] )
handle += [key]
setattr(UpperCamelCase , """handle_key""" , UpperCamelCase )
return func
return decorator
def _snake_case ( *UpperCamelCase : List[str] ):
def decorator(UpperCamelCase : Union[str, Any] ):
UpperCAmelCase : Optional[Any] = getattr(UpperCamelCase , """handle_key""" , [] )
handle += keys
setattr(UpperCamelCase , """handle_key""" , UpperCamelCase )
return func
return decorator
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __new__( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[Any] = super().__new__(cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not hasattr(_SCREAMING_SNAKE_CASE , """key_handler""" ):
setattr(_SCREAMING_SNAKE_CASE , """key_handler""" , {} )
setattr(_SCREAMING_SNAKE_CASE , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase : List[str] = getattr(_SCREAMING_SNAKE_CASE , """handle_key""" , [] )
for key in handled_keys:
UpperCAmelCase : Optional[int] = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE ( cls ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : str = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase : List[Any] = ord(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = cls.key_handler.get(_SCREAMING_SNAKE_CASE )
if handler:
UpperCAmelCase : int = char
return handler(cls )
else:
return None
def _snake_case ( cls : Union[str, Any] ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 76
| 1
|
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = PriorTransformer
lowerCAmelCase : int = "hidden_states"
@property
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Dict = 4
lowercase__ : List[Any] = 8
lowercase__ : Any = 7
lowercase__ : int = floats_tensor((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : Any = floats_tensor((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : int = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_snake_case )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCAmelCase ( self : str ,_snake_case : str=0 ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(_snake_case )
lowercase__ : Optional[Any] = 4
lowercase__ : Optional[int] = 8
lowercase__ : Any = 7
lowercase__ : str = torch.randn((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : Dict = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_snake_case )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return (4, 8)
@property
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return (4, 8)
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Tuple = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
lowercase__ : Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase ( self : int ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : Tuple = PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' ,output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(_snake_case )
lowercase__ : Optional[Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ : Any = self.prepare_init_args_and_inputs_for_common()
lowercase__ : List[str] = self.model_class(**_snake_case )
lowercase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] ,_snake_case )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Any = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
lowercase__ : Dict = model.to(_snake_case )
if hasattr(_snake_case ,'''set_default_attn_processor''' ):
model.set_default_attn_processor()
lowercase__ : Dict = self.get_dummy_seed_input()
with torch.no_grad():
lowercase__ : Optional[int] = model(**_snake_case )[0]
lowercase__ : str = output[0, :5].flatten().cpu()
print(_snake_case )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase__ : Optional[Any] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(_snake_case ,_snake_case ,rtol=1e-2 ) )
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Dict ,_snake_case : List[str]=1 ,_snake_case : Tuple=768 ,_snake_case : List[Any]=77 ,_snake_case : Optional[Any]=0 ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(_snake_case )
lowercase__ : Tuple = batch_size
lowercase__ : List[str] = embedding_dim
lowercase__ : Tuple = num_embeddings
lowercase__ : int = torch.randn((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : str = torch.randn((batch_size, embedding_dim) ).to(_snake_case )
lowercase__ : List[str] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_snake_case )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[Any] ,_snake_case : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' ,subfolder='''prior''' )
model.to(_snake_case )
lowercase__ : List[str] = self.get_dummy_seed_input(seed=_snake_case )
with torch.no_grad():
lowercase__ : List[str] = model(**_snake_case )[0]
assert list(sample.shape ) == [1, 768]
lowercase__ : List[str] = sample[0, :8].flatten().cpu()
print(_snake_case )
lowercase__ : Optional[Any] = torch.tensor(_snake_case )
assert torch_all_close(_snake_case ,_snake_case ,atol=1e-3 )
| 16
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase_ : Dict ,lowercase_ : Dict=7 ,lowercase_ : Optional[int]=3 ,lowercase_ : int=3_0 ,lowercase_ : Optional[Any]=4_0_0 ,lowercase_ : Any=True ,lowercase_ : List[str]=None ,lowercase_ : str=True ,lowercase_ : List[Any]=[0.5, 0.5, 0.5] ,lowercase_ : List[str]=[0.5, 0.5, 0.5] ,lowercase_ : Any=True ,lowercase_ : Union[str, Any]=1 / 2_5_5 ,lowercase_ : str=True ,):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase__ : str = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
lowerCAmelCase__ : Any = parent
lowerCAmelCase__ : Tuple = batch_size
lowerCAmelCase__ : List[str] = num_channels
lowerCAmelCase__ : Optional[Any] = min_resolution
lowerCAmelCase__ : Union[str, Any] = max_resolution
lowerCAmelCase__ : Optional[int] = do_resize
lowerCAmelCase__ : str = size
lowerCAmelCase__ : Union[str, Any] = do_normalize
lowerCAmelCase__ : List[str] = image_mean
lowerCAmelCase__ : str = image_std
lowerCAmelCase__ : Optional[Any] = do_rescale
lowerCAmelCase__ : Union[str, Any] = rescale_factor
lowerCAmelCase__ : Optional[Any] = do_pad
def __lowerCAmelCase ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __lowerCAmelCase ( self : List[str] ,lowercase_ : List[Any] ,lowercase_ : int=False ):
if not batched:
lowerCAmelCase__ : Tuple = image_inputs[0]
if isinstance(lowercase_ ,Image.Image ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = image.size
else:
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase__ : Any = int(self.size['''shortest_edge'''] * h / w )
lowerCAmelCase__ : str = self.size['''shortest_edge''']
elif w > h:
lowerCAmelCase__ : Union[str, Any] = self.size['''shortest_edge''']
lowerCAmelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCAmelCase__ : List[str] = self.size['''shortest_edge''']
lowerCAmelCase__ : str = self.size['''shortest_edge''']
else:
lowerCAmelCase__ : Optional[Any] = []
for image in image_inputs:
lowerCAmelCase__ ,lowerCAmelCase__ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase__ : List[str] = max(lowercase_ ,key=lambda lowercase_ : item[0] )[0]
lowerCAmelCase__ : Any = max(lowercase_ ,key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = DetaImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[Any] = DetaImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ ,'''image_mean''' ) )
self.assertTrue(hasattr(lowercase_ ,'''image_std''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_normalize''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_resize''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_rescale''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_pad''' ) )
self.assertTrue(hasattr(lowercase_ ,'''size''' ) )
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad ,lowercase_ )
def __lowerCAmelCase ( self : List[str] ):
pass
def __lowerCAmelCase ( self : Union[str, Any] ):
# Initialize image_processing
lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,Image.Image )
# Test not batched input
lowerCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(lowercase_ ,batched=lowercase_ )
lowerCAmelCase__ : Optional[int] = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __lowerCAmelCase ( self : Dict ):
# Initialize image_processing
lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,np.ndarray )
# Test not batched input
lowerCAmelCase__ : List[str] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
lowerCAmelCase__ ,lowerCAmelCase__ : Any = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowerCAmelCase__ : str = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = self.image_processor_tester.get_expected_values(lowercase_ ,batched=lowercase_ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def __lowerCAmelCase ( self : Union[str, Any] ):
# Initialize image_processing
lowerCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,torch.Tensor )
# Test not batched input
lowerCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
lowerCAmelCase__ ,lowerCAmelCase__ : Any = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowerCAmelCase__ : str = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = self.image_processor_tester.get_expected_values(lowercase_ ,batched=lowercase_ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def __lowerCAmelCase ( self : Tuple ):
# prepare image and target
lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' ,'''r''' ) as f:
lowerCAmelCase__ : Union[str, Any] = json.loads(f.read() )
lowerCAmelCase__ : str = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
lowerCAmelCase__ : Optional[Any] = DetaImageProcessor()
lowerCAmelCase__ : Optional[int] = image_processing(images=lowercase_ ,annotations=lowercase_ ,return_tensors='''pt''' )
# verify pixel values
lowerCAmelCase__ : Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape ,lowercase_ )
lowerCAmelCase__ : Any = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] ,lowercase_ ,atol=1E-4 ) )
# verify area
lowerCAmelCase__ : Tuple = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] ,lowercase_ ) )
# verify boxes
lowerCAmelCase__ : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape ,lowercase_ )
lowerCAmelCase__ : Tuple = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] ,lowercase_ ,atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ : Optional[int] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] ,lowercase_ ) )
# verify is_crowd
lowerCAmelCase__ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] ,lowercase_ ) )
# verify class_labels
lowerCAmelCase__ : Any = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] ,lowercase_ ) )
# verify orig_size
lowerCAmelCase__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] ,lowercase_ ) )
# verify size
lowerCAmelCase__ : str = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] ,lowercase_ ) )
@slow
def __lowerCAmelCase ( self : Any ):
# prepare image, target and masks_path
lowerCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' ,'''r''' ) as f:
lowerCAmelCase__ : str = json.loads(f.read() )
lowerCAmelCase__ : Tuple = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
lowerCAmelCase__ : Optional[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCAmelCase__ : str = DetaImageProcessor(format='''coco_panoptic''' )
lowerCAmelCase__ : Optional[int] = image_processing(images=lowercase_ ,annotations=lowercase_ ,masks_path=lowercase_ ,return_tensors='''pt''' )
# verify pixel values
lowerCAmelCase__ : Any = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape ,lowercase_ )
lowerCAmelCase__ : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] ,lowercase_ ,atol=1E-4 ) )
# verify area
lowerCAmelCase__ : Tuple = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] ,lowercase_ ) )
# verify boxes
lowerCAmelCase__ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape ,lowercase_ )
lowerCAmelCase__ : str = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] ,lowercase_ ,atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] ,lowercase_ ) )
# verify is_crowd
lowerCAmelCase__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] ,lowercase_ ) )
# verify class_labels
lowerCAmelCase__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] ,lowercase_ ) )
# verify masks
lowerCAmelCase__ : Optional[int] = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() ,lowercase_ )
# verify orig_size
lowerCAmelCase__ : List[Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] ,lowercase_ ) )
# verify size
lowerCAmelCase__ : Optional[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] ,lowercase_ ) )
| 106
| 0
|
"""simple docstring"""
lowerCAmelCase__ = '''Tobias Carryer'''
from time import time
class __snake_case :
def __init__( self : str , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=int(time() ) ): # noqa: B008
"""simple docstring"""
_lowerCamelCase : List[Any] = multiplier
_lowerCamelCase : Any = increment
_lowerCamelCase : List[str] = modulo
_lowerCamelCase : Optional[int] = seed
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Tuple = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
lowerCAmelCase__ = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31)
while True:
print(lcg.next_number())
| 175
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __snake_case ( _lowercase):
snake_case__ : torch.FloatTensor
class __snake_case ( _lowercase , _lowercase):
@register_to_config
def __init__( self : int , __lowerCAmelCase : int = 3_2 , __lowerCAmelCase : int = 6_4 , __lowerCAmelCase : int = 2_0 , __lowerCAmelCase : int = 7_6_8 , __lowerCAmelCase : Tuple=7_7 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : str = "silu" , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[str] = "linear" , __lowerCAmelCase : Optional[str] = "prd" , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : Optional[int] = attention_head_dim
_lowerCamelCase : Dict = num_attention_heads * attention_head_dim
_lowerCamelCase : Optional[Any] = additional_embeddings
_lowerCamelCase : List[Any] = time_embed_dim or inner_dim
_lowerCamelCase : Optional[Any] = embedding_proj_dim or embedding_dim
_lowerCamelCase : Tuple = clip_embed_dim or embedding_dim
_lowerCamelCase : int = Timesteps(__lowerCAmelCase , __lowerCAmelCase , 0 )
_lowerCamelCase : str = TimestepEmbedding(__lowerCAmelCase , __lowerCAmelCase , out_dim=__lowerCAmelCase , act_fn=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
if embedding_proj_norm_type is None:
_lowerCamelCase : Dict = None
elif embedding_proj_norm_type == "layer":
_lowerCamelCase : Any = nn.LayerNorm(__lowerCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
_lowerCamelCase : List[str] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
if encoder_hid_proj_type is None:
_lowerCamelCase : Any = None
elif encoder_hid_proj_type == "linear":
_lowerCamelCase : List[str] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
_lowerCamelCase : Optional[int] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowerCAmelCase ) )
if added_emb_type == "prd":
_lowerCamelCase : Optional[Any] = nn.Parameter(torch.zeros(1 , 1 , __lowerCAmelCase ) )
elif added_emb_type is None:
_lowerCamelCase : Union[str, Any] = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
_lowerCamelCase : Tuple = nn.ModuleList(
[
BasicTransformerBlock(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dropout=__lowerCAmelCase , activation_fn='''gelu''' , attention_bias=__lowerCAmelCase , )
for d in range(__lowerCAmelCase )
] )
if norm_in_type == "layer":
_lowerCamelCase : Any = nn.LayerNorm(__lowerCAmelCase )
elif norm_in_type is None:
_lowerCamelCase : Any = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
_lowerCamelCase : str = nn.LayerNorm(__lowerCAmelCase )
_lowerCamelCase : List[Any] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_lowerCamelCase : Dict = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , __lowerCAmelCase , persistent=__lowerCAmelCase )
_lowerCamelCase : int = nn.Parameter(torch.zeros(1 , __lowerCAmelCase ) )
_lowerCamelCase : Tuple = nn.Parameter(torch.zeros(1 , __lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Any = {}
def fn_recursive_add_processors(__lowerCAmelCase : str , __lowerCAmelCase : torch.nn.Module , __lowerCAmelCase : Dict[str, AttentionProcessor] ):
if hasattr(__lowerCAmelCase , '''set_processor''' ):
_lowerCamelCase : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , __lowerCAmelCase , __lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return processors
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = len(self.attn_processors.keys() )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(__lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(__lowerCAmelCase : str , __lowerCAmelCase : torch.nn.Module , __lowerCAmelCase : Tuple ):
if hasattr(__lowerCAmelCase , '''set_processor''' ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
module.set_processor(__lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , __lowerCAmelCase , __lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[torch.Tensor, float, int] , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[torch.BoolTensor] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = hidden_states.shape[0]
_lowerCamelCase : Union[str, Any] = timestep
if not torch.is_tensor(__lowerCAmelCase ):
_lowerCamelCase : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__lowerCAmelCase ) and len(timesteps.shape ) == 0:
_lowerCamelCase : str = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCamelCase : Tuple = timesteps * torch.ones(__lowerCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
_lowerCamelCase : Tuple = self.time_proj(__lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCamelCase : Union[str, Any] = timesteps_projected.to(dtype=self.dtype )
_lowerCamelCase : Any = self.time_embedding(__lowerCAmelCase )
if self.embedding_proj_norm is not None:
_lowerCamelCase : str = self.embedding_proj_norm(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.embedding_proj(__lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCamelCase : Optional[Any] = self.encoder_hidden_states_proj(__lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
_lowerCamelCase : str = self.proj_in(__lowerCAmelCase )
_lowerCamelCase : Tuple = self.positional_embedding.to(hidden_states.dtype )
_lowerCamelCase : Tuple = []
_lowerCamelCase : Dict = 0
if encoder_hidden_states is not None:
additional_embeds.append(__lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCamelCase : str = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCamelCase : Dict = hidden_states[:, None, :]
_lowerCamelCase : int = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCamelCase : int = self.prd_embedding.to(hidden_states.dtype ).expand(__lowerCAmelCase , -1 , -1 )
additional_embeds.append(__lowerCAmelCase )
_lowerCamelCase : str = torch.cat(
__lowerCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCamelCase : Dict = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCamelCase : List[Any] = F.pad(
__lowerCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_lowerCamelCase : Any = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCamelCase : Union[str, Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_lowerCamelCase : Tuple = F.pad(__lowerCAmelCase , (0, self.additional_embeddings) , value=0.0 )
_lowerCamelCase : Any = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCamelCase : int = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_lowerCamelCase : List[str] = self.norm_in(__lowerCAmelCase )
for block in self.transformer_blocks:
_lowerCamelCase : List[str] = block(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.norm_out(__lowerCAmelCase )
if self.prd_embedding is not None:
_lowerCamelCase : Any = hidden_states[:, -1]
else:
_lowerCamelCase : Dict = hidden_states[:, additional_embeddings_len:]
_lowerCamelCase : str = self.proj_to_clip_embeddings(__lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 175
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __snake_case ( unittest.TestCase):
def __init__( self : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str]=7 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Dict=1_8 , __lowerCAmelCase : str=3_0 , __lowerCAmelCase : Union[str, Any]=4_0_0 , __lowerCAmelCase : int=True , __lowerCAmelCase : Any=None , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[Any]=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , __lowerCAmelCase : Optional[Any]=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , __lowerCAmelCase : Optional[int]=True , ):
"""simple docstring"""
_lowerCamelCase : Dict = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_lowerCamelCase : Dict = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
_lowerCamelCase : Dict = parent
_lowerCamelCase : List[Any] = batch_size
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : Optional[Any] = image_size
_lowerCamelCase : List[Any] = min_resolution
_lowerCamelCase : Optional[Any] = max_resolution
_lowerCamelCase : Dict = do_resize
_lowerCamelCase : List[Any] = size
_lowerCamelCase : Tuple = do_center_crop
_lowerCamelCase : List[str] = crop_size
_lowerCamelCase : Dict = do_normalize
_lowerCamelCase : Optional[Any] = image_mean
_lowerCamelCase : Dict = image_std
_lowerCamelCase : Optional[Any] = do_convert_rgb
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : Optional[Any]=False ):
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_lowerCamelCase : Optional[Any] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_lowerCamelCase : Union[str, Any] = []
for i in range(self.batch_size ):
_lowerCamelCase , _lowerCamelCase : Dict = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_lowerCamelCase : Any = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
if torchify:
_lowerCamelCase : List[Any] = [torch.from_numpy(__lowerCAmelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Union[str, Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = ChineseCLIPImageProcessingTester(self , do_center_crop=__lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''center_crop''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_convert_rgb''' ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 2_2_4, '''width''': 2_2_4} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
_lowerCamelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_lowerCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCamelCase : str = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_lowerCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCamelCase : Optional[int] = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_lowerCamelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCamelCase : Union[str, Any] = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__lowerCAmelCase )
_lowerCamelCase : List[Any] = 3
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''center_crop''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_convert_rgb''' ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_lowerCamelCase : str = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 72
|
"""simple docstring"""
def snake_case_ ( A_ : list[int], A_ : str ):
'''simple docstring'''
_lowerCamelCase : Tuple = int(A_ )
# Initialize Result
_lowerCamelCase : Dict = []
# Traverse through all denomination
for denomination in reversed(A_ ):
# Find denominations
while int(A_ ) >= int(A_ ):
total_value -= int(A_ )
answer.append(A_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCAmelCase__ = []
lowerCAmelCase__ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
lowerCAmelCase__ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
lowerCAmelCase__ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
lowerCAmelCase__ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
lowerCAmelCase__ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 72
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : List[Any] = """canine"""
def __init__( self , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=1_6384 , a=16 , a=0.02 , a=1e-12 , a=0 , a=0xE_000 , a=0xE_001 , a=4 , a=4 , a=8 , a=1_6384 , a=128 , **a , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
lowercase__ : int = max_position_embeddings
lowercase__ : List[str] = hidden_size
lowercase__ : int = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : List[Any] = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : Optional[Any] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Optional[int] = initializer_range
lowercase__ : Dict = type_vocab_size
lowercase__ : Any = layer_norm_eps
# Character config:
lowercase__ : List[Any] = downsampling_rate
lowercase__ : Dict = upsampling_kernel_size
lowercase__ : Any = num_hash_functions
lowercase__ : Any = num_hash_buckets
lowercase__ : Optional[int] = local_transformer_stride
| 358
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case_ = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
snake_case_ = {
'''junnyu/roformer_chinese_small''': 1_536,
'''junnyu/roformer_chinese_base''': 1_536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
snake_case_ = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase : Tuple = RoFormerTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ):
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
lowercase__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get('lowercase' , a) != do_lower_case
or pre_tok_state.get('strip_accents' , a) != strip_accents
):
lowercase__ : Optional[int] = getattr(a , pre_tok_state.pop('type'))
lowercase__ : str = do_lower_case
lowercase__ : Union[str, Any] = strip_accents
lowercase__ : int = pre_tok_class(**a)
lowercase__ : Optional[int] = do_lower_case
def __getstate__( self):
lowercase__ : str = self.__dict__.copy()
lowercase__ : Any = BertPreTokenizer()
return state
def __setstate__( self , a):
lowercase__ : Union[str, Any] = d
lowercase__ : int = self.__dict__['_tokenizer'].get_vocab()
lowercase__ : List[str] = PreTokenizer.custom(JiebaPreTokenizer(a))
def snake_case_ ( self , a , a=None):
lowercase__ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , a , a = None):
lowercase__ : Tuple = [self.sep_token_id]
lowercase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case_ ( self , a , a = None):
lowercase__ : Optional[Any] = self._tokenizer.model.save(a , name=a)
return tuple(a)
def snake_case_ ( self , a , a=None , a=None , a=False , **a , ):
lowercase__ : List[str] = BertPreTokenizer()
return super().save_pretrained(a , a , a , a , **a)
| 216
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 58
|
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = jnp.floataa
_lowerCamelCase = True
def UpperCamelCase__( self ):
'''simple docstring'''
super().setup()
__A : List[Any] = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
__A : Tuple = super().__call__(*__lowerCamelCase , **__lowerCamelCase )
__A : Optional[int] = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = FlaxBigBirdForNaturalQuestionsModule
def __lowercase ( snake_case_ : Tuple ,snake_case_ : Optional[int] ,snake_case_ : int ,snake_case_ : Tuple ,snake_case_ : Any ,snake_case_ : Any ) ->List[Any]:
'''simple docstring'''
def cross_entropy(snake_case_ : str ,snake_case_ : Optional[Any] ,snake_case_ : Tuple=None ):
__A : Dict = logits.shape[-1]
__A : Dict = (labels[..., None] == jnp.arange(snake_case_ )[None]).astype('''f4''' )
__A : int = jax.nn.log_softmax(snake_case_ ,axis=-1 )
__A : Optional[int] = -jnp.sum(labels * logits ,axis=-1 )
if reduction is not None:
__A : Optional[int] = reduction(snake_case_ )
return loss
__A : str = partial(snake_case_ ,reduction=jnp.mean )
__A : Dict = cross_entropy(snake_case_ ,snake_case_ )
__A : List[str] = cross_entropy(snake_case_ ,snake_case_ )
__A : str = cross_entropy(snake_case_ ,snake_case_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = "google/bigbird-roberta-base"
_lowerCamelCase = 30_00
_lowerCamelCase = 1_05_00
_lowerCamelCase = 1_28
_lowerCamelCase = 3
_lowerCamelCase = 1
_lowerCamelCase = 5
# tx_args
_lowerCamelCase = 3e-5
_lowerCamelCase = 0.0
_lowerCamelCase = 2_00_00
_lowerCamelCase = 0.0_0_9_5
_lowerCamelCase = "bigbird-roberta-natural-questions"
_lowerCamelCase = "training-expt"
_lowerCamelCase = "data/nq-training.jsonl"
_lowerCamelCase = "data/nq-validation.jsonl"
def UpperCamelCase__( self ):
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=__lowerCamelCase )
__A : Dict = os.path.join(self.base_dir , self.save_dir )
__A : Dict = self.batch_size_per_device * jax.device_count()
@dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 40_96 # no dynamic padding on TPUs
def __call__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = self.collate_fn(__lowerCamelCase )
__A : Tuple = jax.tree_util.tree_map(__lowerCamelCase , __lowerCamelCase )
return batch
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A , __A : List[Any] = self.fetch_inputs(features['''input_ids'''] )
__A : Union[str, Any] = {
'''input_ids''': jnp.array(__lowerCamelCase , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(__lowerCamelCase , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Any = [self._fetch_inputs(__lowerCamelCase ) for ids in input_ids]
return zip(*__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Any = [1 for _ in range(len(__lowerCamelCase ) )]
while len(__lowerCamelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __lowercase ( snake_case_ : List[Any] ,snake_case_ : Optional[Any] ,snake_case_ : List[str]=None ) ->Optional[int]:
'''simple docstring'''
if seed is not None:
__A : List[Any] = dataset.shuffle(seed=snake_case_ )
for i in range(len(snake_case_ ) // batch_size ):
__A : Tuple = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(snake_case_ )
@partial(jax.pmap ,axis_name='''batch''' )
def __lowercase ( snake_case_ : str ,snake_case_ : Union[str, Any] ,**snake_case_ : List[str] ) ->Tuple:
'''simple docstring'''
def loss_fn(snake_case_ : List[str] ):
__A : str = model_inputs.pop('''start_labels''' )
__A : str = model_inputs.pop('''end_labels''' )
__A : int = model_inputs.pop('''pooled_labels''' )
__A : Dict = state.apply_fn(**snake_case_ ,params=snake_case_ ,dropout_rng=snake_case_ ,train=snake_case_ )
__A , __A , __A : Union[str, Any] = outputs
return state.loss_fn(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,)
__A , __A : int = jax.random.split(snake_case_ )
__A : str = jax.value_and_grad(snake_case_ )
__A , __A : Optional[int] = grad_fn(state.params )
__A : List[str] = jax.lax.pmean({'''loss''': loss} ,axis_name='''batch''' )
__A : List[str] = jax.lax.pmean(snake_case_ ,'''batch''' )
__A : str = state.apply_gradients(grads=snake_case_ )
return state, metrics, new_drp_rng
@partial(jax.pmap ,axis_name='''batch''' )
def __lowercase ( snake_case_ : int ,**snake_case_ : Union[str, Any] ) ->List[str]:
'''simple docstring'''
__A : Tuple = model_inputs.pop('''start_labels''' )
__A : Dict = model_inputs.pop('''end_labels''' )
__A : int = model_inputs.pop('''pooled_labels''' )
__A : List[str] = state.apply_fn(**snake_case_ ,params=state.params ,train=snake_case_ )
__A , __A , __A : Dict = outputs
__A : Optional[int] = state.loss_fn(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
__A : List[str] = jax.lax.pmean({'''loss''': loss} ,axis_name='''batch''' )
return metrics
class __snake_case ( train_state.TrainState ):
"""simple docstring"""
_lowerCamelCase = struct.field(pytree_node=SCREAMING_SNAKE_CASE__ )
@dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = None
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ):
'''simple docstring'''
__A : Tuple = model.params
__A : Union[str, Any] = TrainState.create(
apply_fn=model.__call__ , params=__lowerCamelCase , tx=__lowerCamelCase , loss_fn=__lowerCamelCase , )
if ckpt_dir is not None:
__A , __A , __A , __A , __A : Optional[Any] = restore_checkpoint(__lowerCamelCase , __lowerCamelCase )
__A : List[Any] = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
__A , __A : List[str] = build_tx(**__lowerCamelCase )
__A : int = train_state.TrainState(
step=__lowerCamelCase , apply_fn=model.__call__ , params=__lowerCamelCase , tx=__lowerCamelCase , opt_state=__lowerCamelCase , )
__A : int = args
__A : Optional[Any] = data_collator
__A : Tuple = lr
__A : List[Any] = params
__A : Dict = jax_utils.replicate(__lowerCamelCase )
return state
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : List[Any] = self.args
__A : Dict = len(__lowerCamelCase ) // args.batch_size
__A : List[Any] = jax.random.PRNGKey(0 )
__A : Optional[Any] = jax.random.split(__lowerCamelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
__A : Tuple = jnp.array(0 , dtype=jnp.floataa )
__A : Optional[Any] = get_batched_dataset(__lowerCamelCase , args.batch_size , seed=__lowerCamelCase )
__A : Union[str, Any] = 0
for batch in tqdm(__lowerCamelCase , total=__lowerCamelCase , desc=F"""Running EPOCH-{epoch}""" ):
__A : Optional[Any] = self.data_collator(__lowerCamelCase )
__A , __A , __A : Union[str, Any] = self.train_step_fn(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
__A : Union[str, Any] = jax_utils.unreplicate(state.step )
__A : Optional[int] = running_loss.item() / i
__A : List[Any] = self.scheduler_fn(state_step - 1 )
__A : Union[str, Any] = self.evaluate(__lowerCamelCase , __lowerCamelCase )
__A : Optional[Any] = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(__lowerCamelCase ) )
self.logger.log(__lowerCamelCase , commit=__lowerCamelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = get_batched_dataset(__lowerCamelCase , self.args.batch_size )
__A : int = len(__lowerCamelCase ) // self.args.batch_size
__A : Optional[Any] = jnp.array(0 , dtype=jnp.floataa )
__A : Dict = 0
for batch in tqdm(__lowerCamelCase , total=__lowerCamelCase , desc='''Evaluating ... ''' ):
__A : List[str] = self.data_collator(__lowerCamelCase )
__A : Union[str, Any] = self.val_step_fn(__lowerCamelCase , **__lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Dict = jax_utils.unreplicate(__lowerCamelCase )
print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=''' ... ''' )
self.model_save_fn(__lowerCamelCase , params=state.params )
with open(os.path.join(__lowerCamelCase , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__lowerCamelCase , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(__lowerCamelCase , '''data_collator.joblib''' ) )
with open(os.path.join(__lowerCamelCase , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , __lowerCamelCase )
print('''DONE''' )
def __lowercase ( snake_case_ : int ,snake_case_ : Dict ) ->Optional[int]:
'''simple docstring'''
print(F"""RESTORING CHECKPOINT FROM {save_dir}""" ,end=''' ... ''' )
with open(os.path.join(snake_case_ ,'''flax_model.msgpack''' ) ,'''rb''' ) as f:
__A : List[Any] = from_bytes(state.params ,f.read() )
with open(os.path.join(snake_case_ ,'''opt_state.msgpack''' ) ,'''rb''' ) as f:
__A : Optional[int] = from_bytes(state.opt_state ,f.read() )
__A : Tuple = joblib.load(os.path.join(snake_case_ ,'''args.joblib''' ) )
__A : List[str] = joblib.load(os.path.join(snake_case_ ,'''data_collator.joblib''' ) )
with open(os.path.join(snake_case_ ,'''training_state.json''' ) ,'''r''' ) as f:
__A : Dict = json.load(snake_case_ )
__A : int = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def __lowercase ( snake_case_ : List[str] ,snake_case_ : Any ,snake_case_ : Dict ,snake_case_ : str ) ->List[str]:
'''simple docstring'''
__A : str = num_train_steps - warmup_steps
__A : Union[str, Any] = optax.linear_schedule(init_value=snake_case_ ,end_value=snake_case_ ,transition_steps=snake_case_ )
__A : Optional[Any] = optax.linear_schedule(init_value=snake_case_ ,end_value=1e-7 ,transition_steps=snake_case_ )
__A : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] ,boundaries=[warmup_steps] )
return lr
def __lowercase ( snake_case_ : List[str] ,snake_case_ : List[Any] ,snake_case_ : Union[str, Any] ,snake_case_ : List[Any] ,snake_case_ : str ) ->List[str]:
'''simple docstring'''
def weight_decay_mask(snake_case_ : List[Any] ):
__A : List[Any] = traverse_util.flatten_dict(snake_case_ )
__A : int = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(snake_case_ )
__A : List[Any] = scheduler_fn(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
__A : List[str] = optax.adamw(learning_rate=snake_case_ ,weight_decay=snake_case_ ,mask=snake_case_ )
return tx, lr
| 179
| 0
|
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
lowerCAmelCase_ : List[str] = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
lowerCAmelCase_ : Any = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
lowerCAmelCase_ : Union[str, Any] = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE (datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def UpperCamelCase__ ( self : List[str] , __a : Dict , __a : List[Any] , __a : Dict=4 , __a : int=False ):
_a = compute_bleu(
reference_corpus=__a , translation_corpus=__a , max_order=__a , smooth=__a )
((_a) , (_a) , (_a) , (_a) , (_a) , (_a)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 371
|
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
lowerCAmelCase_ : str = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
lowerCAmelCase_ : Union[str, Any] = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = (images / 2 + 0.5).clamp(0 , 1 )
_a = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = numpy_to_pil(lowercase )
return images
def _lowerCamelCase ( lowercase : int ) -> List[Any]:
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
_a = [Image.fromarray(lowercase ) for image in images]
return pil_images
| 346
| 0
|
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
a_ = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def lowerCamelCase__ ( _a , _a , _a , _a , _a , _a , _a , _a=False , ):
output_path.parent.mkdir(parents=_a , exist_ok=_a)
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_a , _a , f=output_path.as_posix() , input_names=_a , output_names=_a , dynamic_axes=_a , do_constant_folding=_a , use_external_data_format=_a , enable_onnx_checker=_a , opset_version=_a , )
else:
export(
_a , _a , f=output_path.as_posix() , input_names=_a , output_names=_a , dynamic_axes=_a , do_constant_folding=_a , opset_version=_a , )
@torch.no_grad()
def lowerCamelCase__ ( _a , _a , _a , _a = False):
SCREAMING_SNAKE_CASE : str = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
SCREAMING_SNAKE_CASE : Dict = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA")
else:
SCREAMING_SNAKE_CASE : Tuple = "cpu"
SCREAMING_SNAKE_CASE : Any = StableDiffusionPipeline.from_pretrained(_a , torch_dtype=_a).to(_a)
SCREAMING_SNAKE_CASE : List[Any] = Path(_a)
# TEXT ENCODER
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.text_encoder.config.max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = pipeline.text_encoder.config.hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.tokenizer(
"A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=_a , return_tensors="pt" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=_a , dtype=torch.intaa)) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} , opset=_a , )
del pipeline.text_encoder
# UNET
SCREAMING_SNAKE_CASE : Dict = pipeline.unet.config.in_channels
SCREAMING_SNAKE_CASE : str = pipeline.unet.config.sample_size
SCREAMING_SNAKE_CASE : List[str] = output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , _a , _a , _a).to(device=_a , dtype=_a),
torch.randn(2).to(device=_a , dtype=_a),
torch.randn(2 , _a , _a).to(device=_a , dtype=_a),
False,
) , output_path=_a , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} , opset=_a , use_external_data_format=_a , )
SCREAMING_SNAKE_CASE : List[Any] = str(unet_path.absolute().as_posix())
SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_a)
SCREAMING_SNAKE_CASE : List[Any] = onnx.load(_a)
# clean up existing tensor files
shutil.rmtree(_a)
os.mkdir(_a)
# collate external tensor files into one
onnx.save_model(
_a , _a , save_as_external_data=_a , all_tensors_to_one_file=_a , location="weights.pb" , convert_attribute=_a , )
del pipeline.unet
# VAE ENCODER
SCREAMING_SNAKE_CASE : Any = pipeline.vae
SCREAMING_SNAKE_CASE : Tuple = vae_encoder.config.in_channels
SCREAMING_SNAKE_CASE : Optional[Any] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
SCREAMING_SNAKE_CASE : str = lambda _a , _a: vae_encoder.encode(_a , _a)[0].sample()
onnx_export(
_a , model_args=(
torch.randn(1 , _a , _a , _a).to(device=_a , dtype=_a),
False,
) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=_a , )
# VAE DECODER
SCREAMING_SNAKE_CASE : str = pipeline.vae
SCREAMING_SNAKE_CASE : Tuple = vae_decoder.config.latent_channels
SCREAMING_SNAKE_CASE : str = vae_decoder.config.out_channels
# forward only through the decoder part
SCREAMING_SNAKE_CASE : List[Any] = vae_encoder.decode
onnx_export(
_a , model_args=(
torch.randn(1 , _a , _a , _a).to(device=_a , dtype=_a),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=_a , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
SCREAMING_SNAKE_CASE : int = pipeline.safety_checker
SCREAMING_SNAKE_CASE : List[str] = safety_checker.config.vision_config.num_channels
SCREAMING_SNAKE_CASE : List[Any] = safety_checker.config.vision_config.image_size
SCREAMING_SNAKE_CASE : List[str] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , _a , _a , _a , ).to(device=_a , dtype=_a),
torch.randn(1 , _a , _a , _a).to(device=_a , dtype=_a),
) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} , opset=_a , )
del pipeline.safety_checker
SCREAMING_SNAKE_CASE : Optional[int] = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker")
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.feature_extractor
else:
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Any = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder") , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder") , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder") , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet") , scheduler=pipeline.scheduler , safety_checker=_a , feature_extractor=_a , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(_a)
print("ONNX pipeline saved to" , _a)
del pipeline
del onnx_pipeline
SCREAMING_SNAKE_CASE : Any = OnnxStableDiffusionPipeline.from_pretrained(_a , provider="CPUExecutionProvider")
print("ONNX pipeline is loadable")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
a_ = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 76
|
from collections.abc import Callable
import numpy as np
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Dict = int(np.ceil((x_end - xa) / step_size))
SCREAMING_SNAKE_CASE : Tuple = np.zeros((n + 1,))
SCREAMING_SNAKE_CASE : int = ya
SCREAMING_SNAKE_CASE : int = xa
for k in range(_a):
SCREAMING_SNAKE_CASE : Any = y[k] + step_size * ode_func(_a , y[k])
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76
| 1
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(_a )
class _lowerCamelCase( _a ):
def __init__( self, **lowerCamelCase) -> Any:
"""simple docstring"""
super().__init__(**lowerCamelCase)
requires_backends(self, 'vision')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self, lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
return super().__call__(lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, **lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = {}
if "candidate_labels" in kwargs:
_lowercase : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : List[str] = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None, lowerCamelCase="This is a photo of {}.") -> Dict:
"""simple docstring"""
_lowercase : Dict = load_image(lowerCamelCase)
_lowercase : Optional[int] = self.image_processor(images=[image], return_tensors=self.framework)
_lowercase : int = candidate_labels
_lowercase : Tuple = [hypothesis_template.format(lowerCamelCase) for x in candidate_labels]
_lowercase : Any = self.tokenizer(lowerCamelCase, return_tensors=self.framework, padding=lowerCamelCase)
_lowercase : List[Any] = [text_inputs]
return inputs
def UpperCamelCase ( self, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = model_inputs.pop('candidate_labels')
_lowercase : List[str] = model_inputs.pop('text_inputs')
if isinstance(text_inputs[0], lowerCamelCase):
_lowercase : List[str] = text_inputs[0]
else:
# Batching case.
_lowercase : int = text_inputs[0][0]
_lowercase : List[Any] = self.model(**lowerCamelCase, **lowerCamelCase)
_lowercase : str = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def UpperCamelCase ( self, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = model_outputs.pop('candidate_labels')
_lowercase : List[str] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : List[str] = logits.softmax(dim=-1).squeeze(-1)
_lowercase : Optional[int] = probs.tolist()
if not isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : str = [scores]
elif self.framework == "tf":
_lowercase : List[str] = stable_softmax(lowerCamelCase, axis=-1)
_lowercase : Optional[int] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''')
_lowercase : Any = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase, lowerCamelCase), key=lambda lowerCamelCase: -x[0])
]
return result
| 84
|
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def UpperCamelCase_( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 84
| 1
|
def __lowercase ( lowerCamelCase : Dict ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __lowercase ( lowerCamelCase : dict[int, list[int]] ):
UpperCamelCase_ : str = 0
UpperCamelCase_ : str = len(lowerCamelCase ) # No of vertices in graph
UpperCamelCase_ : Optional[int] = [0] * n
UpperCamelCase_ : int = [False] * n
def dfs(lowerCamelCase : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any] ):
UpperCamelCase_ : Optional[Any] = True
UpperCamelCase_ : List[str] = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , id_ )
UpperCamelCase_ : Tuple = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
UpperCamelCase_ : Optional[Any] = min(low[at] , low[to] )
UpperCamelCase_ : list[tuple[int, int]] = []
for i in range(lowerCamelCase ):
if not visited[i]:
dfs(lowerCamelCase , -1 , lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175
|
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __lowercase ( lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : Any ):
UpperCamelCase_ : List[Any] = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase , config=lowerCamelCase )
UpperCamelCase_ : Any = downstream_dict['projector.weight']
UpperCamelCase_ : List[str] = downstream_dict['projector.bias']
UpperCamelCase_ : Tuple = downstream_dict['model.post_net.linear.weight']
UpperCamelCase_ : int = downstream_dict['model.post_net.linear.bias']
return model
def __lowercase ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] ):
UpperCamelCase_ : Any = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase , config=lowerCamelCase )
UpperCamelCase_ : Tuple = downstream_dict['model.linear.weight']
UpperCamelCase_ : Any = downstream_dict['model.linear.bias']
return model
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any] ):
UpperCamelCase_ : str = UniSpeechSatForXVector.from_pretrained(lowerCamelCase , config=lowerCamelCase )
UpperCamelCase_ : Optional[Any] = downstream_dict['connector.weight']
UpperCamelCase_ : int = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCamelCase_ : Union[str, Any] = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
UpperCamelCase_ : Tuple = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
UpperCamelCase_ : int = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
UpperCamelCase_ : Optional[int] = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
UpperCamelCase_ : Dict = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
UpperCamelCase_ : Optional[int] = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
UpperCamelCase_ : List[Any] = downstream_dict['objective.W']
return model
@torch.no_grad()
def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : int ):
UpperCamelCase_ : int = torch.load(lowerCamelCase , map_location='cpu' )
UpperCamelCase_ : Any = checkpoint['Downstream']
UpperCamelCase_ : Dict = UniSpeechSatConfig.from_pretrained(lowerCamelCase )
UpperCamelCase_ : str = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase , return_attention_mask=lowerCamelCase , do_normalize=lowerCamelCase )
UpperCamelCase_ : int = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
UpperCamelCase_ : Tuple = convert_classification(lowerCamelCase , lowerCamelCase , lowerCamelCase )
elif arch.endswith('ForAudioFrameClassification' ):
UpperCamelCase_ : List[str] = convert_diarization(lowerCamelCase , lowerCamelCase , lowerCamelCase )
elif arch.endswith('ForXVector' ):
UpperCamelCase_ : Union[str, Any] = convert_xvector(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
UpperCamelCase_ : Tuple = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(lowerCamelCase )
hf_model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 175
| 1
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A_ : List[Any] = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 141
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase (lowercase_: str , lowercase_: Optional[int] ) -> str:
A__ : Union[str, Any] = old_name
if "patch_embed" in old_name:
A__ , A__ , A__ : Any = old_name.split(""".""" )
if layer == "0":
A__ : List[Any] = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
A__ : Optional[int] = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
A__ : int = old_name.replace("""3""" , """convolution2""" )
else:
A__ : Dict = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(r"""\d\.\d""" , lowercase_ ):
A__ : str = r"""\b\d{2}\b"""
if bool(re.search(lowercase_ , lowercase_ ) ):
A__ : Optional[Any] = re.search(r"""\d\.\d\d.""" , lowercase_ ).group()
else:
A__ : int = re.search(r"""\d\.\d.""" , lowercase_ ).group()
if int(match[0] ) < 6:
A__ : Optional[Any] = old_name.replace(lowercase_ , """""" )
A__ : Tuple = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
A__ : int = """intermediate_stages.""" + trimmed_name
else:
A__ : Dict = old_name.replace(lowercase_ , """""" )
if int(match[2] ) < num_meta4D_last_stage:
A__ : Optional[int] = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
A__ : Optional[Any] = str(int(match[2] ) - num_meta4D_last_stage )
A__ : Dict = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
A__ : str = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
A__ : Optional[int] = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
A__ : List[Any] = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
A__ : Optional[Any] = trimmed_name.replace("""fc2""" , """linear_out""" )
A__ : str = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(r""".\d.""" , lowercase_ ):
A__ : List[str] = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
A__ : Optional[int] = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A__ : Optional[int] = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A__ : int = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
A__ : Tuple = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
A__ : Optional[int] = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
A__ : Optional[Any] = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
A__ : Optional[Any] = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A__ : Union[str, Any] = new_name.replace("""norm""" , """layernorm""" )
A__ : Union[str, Any] = """efficientformer.""" + new_name
else:
A__ : int = """efficientformer.encoder.""" + new_name
return new_name
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: Union[str, Any] ) -> Tuple:
for key in checkpoint.copy().keys():
A__ : List[Any] = checkpoint.pop(lowercase_ )
A__ : Dict = val
return checkpoint
def UpperCamelCase () -> Optional[int]:
A__ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : List[str] = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return image
def UpperCamelCase (lowercase_: Path , lowercase_: Path , lowercase_: Path , lowercase_: bool ) -> Tuple:
A__ : Any = torch.load(lowercase_ , map_location="""cpu""" )["""model"""]
A__ : List[Any] = EfficientFormerConfig.from_json_file(lowercase_ )
A__ : Any = EfficientFormerForImageClassificationWithTeacher(lowercase_ )
A__ : List[str] = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
A__ : Union[str, Any] = config.depths[-1] - config.num_metaad_blocks + 1
A__ : Any = convert_torch_checkpoint(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
A__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
A__ : Optional[int] = prepare_img()
A__ : Optional[Any] = 256
A__ : str = 224
A__ : List[str] = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
A__ : Tuple = processor(images=lowercase_ , return_tensors="""pt""" ).pixel_values
# original processing pipeline
A__ : List[Any] = Compose(
[
Resize(lowercase_ , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(lowercase_ ),
ToTensor(),
Normalize(lowercase_ , lowercase_ ),
] )
A__ : Any = image_transforms(lowercase_ ).unsqueeze(0 )
assert torch.allclose(lowercase_ , lowercase_ )
A__ : Optional[int] = model(lowercase_ )
A__ : List[str] = outputs.logits
A__ : Tuple = (1, 1000)
if "l1" in model_name:
A__ : List[str] = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowercase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
A__ : Any = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowercase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
A__ : Union[str, Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowercase_ )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="""Add model""" , use_temp_dir=lowercase_ , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="""Add image processor""" , use_temp_dir=lowercase_ , )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
A_ : List[Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 141
| 1
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
# Return True if there is node that has not iterated.
UpperCAmelCase__ : Any = [False] * len(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = []
queue.append(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = True
while queue:
UpperCAmelCase__ : Optional[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCAmelCase__ )
UpperCAmelCase__ : int = True
UpperCAmelCase__ : int = u
return visited[t]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
# This array is filled by BFS and to store path
UpperCAmelCase__ : List[str] = [-1] * (len(lowerCAmelCase__ ))
UpperCAmelCase__ : Optional[int] = 0
while bfs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[int] = float('''Inf''' )
UpperCAmelCase__ : Optional[int] = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase__ : Dict = min(lowerCAmelCase__ , graph[parent[s]][s] )
UpperCAmelCase__ : str = parent[s]
max_flow += path_flow
UpperCAmelCase__ : int = sink
while v != source:
UpperCAmelCase__ : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase__ : List[str] = parent[v]
return max_flow
UpperCamelCase__ = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCamelCase__ , UpperCamelCase__ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 181
|
from __future__ import annotations
from typing import Any
class UpperCamelCase__ :
def __init__(self : Union[str, Any] , snake_case_ : int ):
__a : Dict = num_of_nodes
__a : list[list[int]] = []
__a : dict[int, int] = {}
def lowerCAmelCase (self : Optional[Any] , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowerCAmelCase (self : Any , snake_case_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowerCAmelCase (self : str , snake_case_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__a : Optional[int] = self.find_component(snake_case_ )
def lowerCAmelCase (self : Any , snake_case_ : list[int] , snake_case_ : int , snake_case_ : int ):
if component_size[u_node] <= component_size[v_node]:
__a : List[str] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case_ )
elif component_size[u_node] >= component_size[v_node]:
__a : Optional[int] = self.find_component(snake_case_ )
component_size[u_node] += component_size[v_node]
self.set_component(snake_case_ )
def lowerCAmelCase (self : Optional[Any] ):
__a : str = []
__a : int = 0
__a : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__a : Union[str, Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__a , __a , __a : Optional[Any] = edge
__a : List[str] = self.m_component[u]
__a : List[Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__a : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case_ , snake_case_ ):
__a , __a , __a : str = edge
__a : Any = self.m_component[u]
__a : Any = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case_ , snake_case_ , snake_case_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__a : Optional[int] = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def __UpperCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216
| 0
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
a_ , a_ = head.next, head
while fast and fast.next:
a_ = fast.next.next
a_ = slow.next
a_ = slow.next
a_ = None # Don't forget here! But forget still works!
# reverse the second part
a_ = None
while second:
a_ = second.next
a_ = node
a_ = second
a_ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
a_ = node.next
a_ = head.next
return True
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
a_ = a_ = a_ = head
while fast and fast.next:
a_ , a_ = fast.next.next, slow.next
# 2. Push the second half into the stack
a_ = [slow.val]
while slow.next:
a_ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
a_ = cur.next
return True
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
if not head or not head.next:
return True
a_ = {}
a_ = 0
while head:
if head.val in d:
d[head.val].append(UpperCAmelCase )
else:
a_ = [pos]
a_ = head.next
pos += 1
a_ = pos - 1
a_ = 0
for v in d.values():
if len(UpperCAmelCase ) % 2 != 0:
middle += 1
else:
a_ = 0
for i in range(0 , len(UpperCAmelCase ) ):
if v[i] + v[len(UpperCAmelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 303
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
if "resnet-50" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
a_ = DetrConfig(use_timm_backbone=UpperCAmelCase , backbone_config=UpperCAmelCase )
# set label attributes
a_ = "panoptic" in model_name
if is_panoptic:
a_ = 250
else:
a_ = 91
a_ = "huggingface/label-files"
a_ = "coco-detection-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->Optional[Any]:
"""simple docstring"""
a_ = ""
if is_panoptic:
a_ = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
a_ = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a_ = in_proj_weight_cross_attn[:256, :]
a_ = in_proj_bias_cross_attn[:256]
a_ = in_proj_weight_cross_attn[256:512, :]
a_ = in_proj_bias_cross_attn[256:512]
a_ = in_proj_weight_cross_attn[-256:, :]
a_ = in_proj_bias_cross_attn[-256:]
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
a_ , a_ = get_detr_config(UpperCAmelCase )
# load original model from torch hub
a_ = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F'''Converting model {model_name}...''' )
a_ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase ).eval()
a_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCAmelCase ):
if is_panoptic:
a_ = "detr." + src
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase , is_panoptic=UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a_ = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
# finally, create HuggingFace model and load state dict
a_ = DetrForSegmentation(UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
# verify our conversion on an image
a_ = "coco_panoptic" if is_panoptic else "coco_detection"
a_ = DetrImageProcessor(format=UpperCAmelCase )
a_ = processor(images=prepare_img() , return_tensors="pt" )
a_ = encoding["pixel_values"]
a_ = detr(UpperCAmelCase )
a_ = model(UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
UpperCamelCase_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase_ = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 346
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Any =FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
UpperCAmelCase : Tuple =AutoTokenizer.from_pretrained('''google/mt5-small''' )
UpperCAmelCase : List[str] =tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
UpperCAmelCase : List[Any] =tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
UpperCAmelCase : Union[str, Any] =shift_tokens_right(snake_case__ , model.config.pad_token_id , model.config.decoder_start_token_id )
UpperCAmelCase : List[str] =model(snake_case__ , decoder_input_ids=snake_case__ ).logits
UpperCAmelCase : Any =optax.softmax_cross_entropy(snake_case__ , onehot(snake_case__ , logits.shape[-1] ) ).mean()
UpperCAmelCase : Union[str, Any] =-(labels.shape[-1] * loss.item())
UpperCAmelCase : List[str] =-84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 348
| 0
|
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _snake_case ( lowerCamelCase__ : Any ) -> Union[str, Any]: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _snake_case ( ) -> List[Any]:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCamelCase_ : Optional[Any] =[1, 2, 3]
with pytest.raises(lowerCamelCase__ ):
with parallel_backend("unsupported backend" ):
map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=2 )
with pytest.raises(lowerCamelCase__ ):
with parallel_backend("unsupported backend" ):
map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def _snake_case ( lowerCamelCase__ : Tuple ) -> Optional[Any]:
lowerCamelCase_ : str =[1, 2]
lowerCamelCase_ : List[str] ={"a": 1, "b": 2}
lowerCamelCase_ : List[str] ={"a": [1, 2], "b": [3, 4]}
lowerCamelCase_ : Optional[int] ={"a": {"1": 1}, "b": 2}
lowerCamelCase_ : int ={"a": 1, "b": 2, "c": 3, "d": 4}
lowerCamelCase_ : Optional[int] =[2, 3]
lowerCamelCase_ : List[Any] ={"a": 2, "b": 3}
lowerCamelCase_ : int ={"a": [2, 3], "b": [4, 5]}
lowerCamelCase_ : str ={"a": {"1": 2}, "b": 3}
lowerCamelCase_ : Dict ={"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
| 209
|
"""simple docstring"""
import torch
def _snake_case ( ) -> Union[str, Any]:
if torch.cuda.is_available():
lowerCamelCase_ : int =torch.cuda.device_count()
else:
lowerCamelCase_ : List[str] =0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 209
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 84
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Optional[Any] = "data2vec-text"
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.0_2 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> Tuple:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
lowerCAmelCase_ :Dict = vocab_size
lowerCAmelCase_ :Dict = hidden_size
lowerCAmelCase_ :int = num_hidden_layers
lowerCAmelCase_ :List[Any] = num_attention_heads
lowerCAmelCase_ :Any = hidden_act
lowerCAmelCase_ :Optional[int] = intermediate_size
lowerCAmelCase_ :str = hidden_dropout_prob
lowerCAmelCase_ :Any = attention_probs_dropout_prob
lowerCAmelCase_ :str = max_position_embeddings
lowerCAmelCase_ :int = type_vocab_size
lowerCAmelCase_ :Tuple = initializer_range
lowerCAmelCase_ :List[Any] = layer_norm_eps
lowerCAmelCase_ :List[Any] = position_embedding_type
lowerCAmelCase_ :List[Any] = use_cache
lowerCAmelCase_ :List[Any] = classifier_dropout
class _SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ :List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase_ :List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 84
| 1
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a :
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any]=13 , UpperCamelCase: str=10 , UpperCamelCase: Dict=3 , UpperCamelCase: Any=2 , UpperCamelCase: str=2 , UpperCamelCase: Any=2 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Any=True , UpperCamelCase: Dict=32 , UpperCamelCase: Optional[int]=5 , UpperCamelCase: Tuple=4 , UpperCamelCase: Optional[int]=37 , UpperCamelCase: Dict="gelu" , UpperCamelCase: Optional[int]=0.1 , UpperCamelCase: Dict=0.1 , UpperCamelCase: Union[str, Any]=10 , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: str=0.9 , UpperCamelCase: Any=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = patch_size
A__ = tubelet_size
A__ = num_frames
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = mask_ratio
A__ = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
A__ = (image_size // patch_size) ** 2
A__ = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
A__ = int(mask_ratio * self.seq_length )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Any , UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = VideoMAEModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] ):
"""simple docstring"""
A__ = VideoMAEForPreTraining(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A__ = torch.ones((self.num_masks,) )
A__ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
A__ = mask.expand(self.batch_size , -1 ).bool()
A__ = model(UpperCamelCase , UpperCamelCase )
# model only returns predictions for masked patches
A__ = mask.sum().item()
A__ = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
UpperCAmelCase = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = VideoMAEModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self: str , UpperCamelCase: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: Union[str, Any]=False ):
"""simple docstring"""
A__ = copy.deepcopy(UpperCamelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
A__ = torch.ones((self.model_tester.num_masks,) )
A__ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
A__ = mask.expand(self.model_tester.batch_size , -1 ).bool()
A__ = bool_masked_pos.to(UpperCamelCase )
if return_labels:
if model_class in [
*get_values(UpperCamelCase ),
]:
A__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase )
return inputs_dict
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
pass
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase )
@slow
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = VideoMAEModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = self.model_tester.seq_length - self.model_tester.num_masks
A__ = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
A__ = True
A__ = False
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ = len(UpperCamelCase )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase: Tuple , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] ):
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
A__ = self.model_tester.seq_length - self.model_tester.num_masks
A__ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
pass
def _snake_case ( ):
A__ = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
A__ = np.load(UpperCAmelCase_ )
return list(UpperCAmelCase_ )
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
UpperCamelCase )
A__ = self.default_image_processor
A__ = prepare_video()
A__ = image_processor(UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# forward pass
with torch.no_grad():
A__ = model(**UpperCamelCase )
# verify the logits
A__ = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
A__ = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(UpperCamelCase )
A__ = self.default_image_processor
A__ = prepare_video()
A__ = image_processor(UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# add boolean mask, indicating which patches to mask
A__ = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
A__ = torch.load(UpperCamelCase )
# forward pass
with torch.no_grad():
A__ = model(**UpperCamelCase )
# verify the logits
A__ = torch.Size([1, 14_08, 15_36] )
A__ = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=UpperCamelCase )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
A__ = torch.tensor([0.5_142] , device=UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
A__ = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=UpperCamelCase ).to(
UpperCamelCase )
with torch.no_grad():
A__ = model(**UpperCamelCase )
A__ = torch.tensor(torch.tensor([0.6_469] ) , device=UpperCamelCase )
self.assertTrue(torch.allclose(outputs.loss , UpperCamelCase , atol=1e-4 ) )
| 69
|
"""simple docstring"""
from typing import Any
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: Any ):
"""simple docstring"""
A__ = data
A__ = None
class a :
"""simple docstring"""
def __init__( self: List[str] ):
"""simple docstring"""
A__ = None
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.head
while temp is not None:
print(temp.data , end=""" """ )
A__ = temp.next
print()
def UpperCamelCase ( self: str , UpperCamelCase: Any ):
"""simple docstring"""
A__ = Node(UpperCamelCase )
A__ = self.head
A__ = new_node
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
A__ = self.head
while node_a is not None and node_a.data != node_data_a:
A__ = node_a.next
A__ = self.head
while node_a is not None and node_a.data != node_data_a:
A__ = node_a.next
if node_a is None or node_a is None:
return
A__ , A__ = node_a.data, node_a.data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Tuple = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 69
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowerCAmelCase ( A ):
lowerCAmelCase_ = "time_series_transformer"
lowerCAmelCase_ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : List[Any] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : str = "student_t" , __lowercase : str = "nll" , __lowercase : int = 1 , __lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase : Optional[Union[str, bool]] = "mean" , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : Optional[List[int]] = None , __lowercase : Optional[List[int]] = None , __lowercase : int = 32 , __lowercase : int = 32 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : bool = True , __lowercase : str = "gelu" , __lowercase : int = 64 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 100 , __lowercase : float = 0.0_2 , __lowercase : Dict=True , **__lowercase : Tuple , ):
"""simple docstring"""
__lowercase =prediction_length
__lowercase =context_length or prediction_length
__lowercase =distribution_output
__lowercase =loss
__lowercase =input_size
__lowercase =num_time_features
__lowercase =lags_sequence
__lowercase =scaling
__lowercase =num_dynamic_real_features
__lowercase =num_static_real_features
__lowercase =num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__lowercase =cardinality
else:
__lowercase =[0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__lowercase =embedding_dimension
else:
__lowercase =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowercase =num_parallel_samples
# Transformer architecture configuration
__lowercase =input_size * len(__lowercase ) + self._number_of_features
__lowercase =d_model
__lowercase =encoder_attention_heads
__lowercase =decoder_attention_heads
__lowercase =encoder_ffn_dim
__lowercase =decoder_ffn_dim
__lowercase =encoder_layers
__lowercase =decoder_layers
__lowercase =dropout
__lowercase =attention_dropout
__lowercase =activation_dropout
__lowercase =encoder_layerdrop
__lowercase =decoder_layerdrop
__lowercase =activation_function
__lowercase =init_std
__lowercase =use_cache
super().__init__(is_encoder_decoder=__lowercase , **__lowercase )
@property
def snake_case ( self : List[str] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 141
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFImgaImgSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case ( self : int ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def snake_case ( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : List[str]=0 ):
"""simple docstring"""
if str(__lowercase ).startswith('mps' ):
__lowercase =torch.manual_seed(__lowercase )
else:
__lowercase =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowercase =floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowercase =floats_tensor((1, 3, 16, 16) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowercase ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def snake_case ( self : int ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case ( self : str ):
"""simple docstring"""
self._test_save_load_local()
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 141
| 1
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
_lowerCamelCase = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
_lowerCamelCase = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def a__ ( ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Dict = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
UpperCAmelCase_ : Optional[Any] = bs[:]
UpperCAmelCase_ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ : List[str] = [chr(_SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def a__ ( _SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = set()
UpperCAmelCase_ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : List[Any] = char
return pairs
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : str =VOCAB_FILES_NAMES
__A : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
__A : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Any =["input_ids", "attention_mask"]
def __init__( self ,_snake_case ,_snake_case ,_snake_case="replace" ,_snake_case="<s>" ,_snake_case="</s>" ,_snake_case="</s>" ,_snake_case="<s>" ,_snake_case="<unk>" ,_snake_case="<pad>" ,_snake_case="<mask>" ,_snake_case=False ,**_snake_case ,):
UpperCAmelCase_ : List[Any] = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else bos_token
UpperCAmelCase_ : int = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else eos_token
UpperCAmelCase_ : str = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else sep_token
UpperCAmelCase_ : List[str] = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else cls_token
UpperCAmelCase_ : Union[str, Any] = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else unk_token
UpperCAmelCase_ : str = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : Dict = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else mask_token
super().__init__(
errors=_snake_case ,bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,mask_token=_snake_case ,add_prefix_space=_snake_case ,**_snake_case ,)
with open(_snake_case ,encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ : Any = json.load(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : Union[str, Any] = errors # how to handle errors in decoding
UpperCAmelCase_ : List[str] = bytes_to_unicode()
UpperCAmelCase_ : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(_snake_case ,encoding="utf-8" ) as merges_handle:
UpperCAmelCase_ : Union[str, Any] = merges_handle.read().split("\n" )[1:-1]
UpperCAmelCase_ : Any = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ : Dict = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ : Union[str, Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def UpperCamelCase__ ( self ):
return len(self.encoder )
def UpperCamelCase__ ( self ):
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCamelCase__ ( self ,_snake_case ):
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Any = tuple(_snake_case )
UpperCAmelCase_ : List[str] = get_pairs(_snake_case )
if not pairs:
return token
while True:
UpperCAmelCase_ : List[Any] = min(_snake_case ,key=lambda _snake_case : self.bpe_ranks.get(_snake_case ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = bigram
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Optional[Any] = 0
while i < len(_snake_case ):
try:
UpperCAmelCase_ : Any = word.index(_snake_case ,_snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : int = j
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : List[Any] = tuple(_snake_case )
UpperCAmelCase_ : Optional[int] = new_word
if len(_snake_case ) == 1:
break
else:
UpperCAmelCase_ : List[str] = get_pairs(_snake_case )
UpperCAmelCase_ : Optional[Any] = " ".join(_snake_case )
UpperCAmelCase_ : Union[str, Any] = word
return word
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Tuple = []
for token in re.findall(self.pat ,_snake_case ):
UpperCAmelCase_ : Optional[Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_snake_case ).split(" " ) )
return bpe_tokens
def UpperCamelCase__ ( self ,_snake_case ):
return self.encoder.get(_snake_case ,self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self ,_snake_case ):
return self.decoder.get(_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Optional[int] = "".join(_snake_case )
UpperCAmelCase_ : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" ,errors=self.errors )
return text
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
if not os.path.isdir(_snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : int = os.path.join(
_snake_case ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : Optional[Any] = os.path.join(
_snake_case ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_snake_case ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_snake_case ,ensure_ascii=_snake_case ) + "\n" )
UpperCAmelCase_ : Union[str, Any] = 0
with open(_snake_case ,"w" ,encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase_ : Optional[int] = token_index
writer.write(" ".join(_snake_case ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : List[str] = [self.cls_token_id]
UpperCAmelCase_ : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=False ,**_snake_case ):
UpperCAmelCase_ : int = kwargs.pop("add_prefix_space" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_snake_case ) > 0 and not text[0].isspace()):
UpperCAmelCase_ : Optional[Any] = " " + text
return (text, kwargs)
| 67
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : int ="mra"
def __init__( self ,_snake_case=5_02_65 ,_snake_case=7_68 ,_snake_case=12 ,_snake_case=12 ,_snake_case=30_72 ,_snake_case="gelu" ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=5_12 ,_snake_case=1 ,_snake_case=0.02 ,_snake_case=1E-5 ,_snake_case="absolute" ,_snake_case=4 ,_snake_case="full" ,_snake_case=0 ,_snake_case=0 ,_snake_case=1 ,_snake_case=0 ,_snake_case=2 ,**_snake_case ,):
super().__init__(pad_token_id=_snake_case ,bos_token_id=_snake_case ,eos_token_id=_snake_case ,**_snake_case )
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Dict = type_vocab_size
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Tuple = position_embedding_type
UpperCAmelCase_ : Optional[Any] = block_per_row
UpperCAmelCase_ : Any = approx_mode
UpperCAmelCase_ : Dict = initial_prior_first_n_blocks
UpperCAmelCase_ : str = initial_prior_diagonal_n_blocks
| 67
| 1
|
import fire
from utils import calculate_rouge, save_json
def a__ ( snake_case , snake_case , snake_case=None , **snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = [x.strip() for x in open(snake_case ).readlines()]
__SCREAMING_SNAKE_CASE : Any = [x.strip() for x in open(snake_case ).readlines()][: len(snake_case )]
__SCREAMING_SNAKE_CASE : List[Any] = calculate_rouge(snake_case , snake_case , **snake_case )
if save_path is not None:
save_json(snake_case , snake_case , indent=snake_case )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 303
|
import sys
from collections import defaultdict
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = []
def UpperCAmelCase__ ( self : List[str] , _A : str ):
"""simple docstring"""
return self.node_position[vertex]
def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = pos
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] , _A : Union[str, Any] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE : List[Any] = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE : Dict = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = temp, tempa
__SCREAMING_SNAKE_CASE : Any = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCAmelCase__ ( self : Any , _A : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE : Optional[Any] = heap[parent]
__SCREAMING_SNAKE_CASE : str = position[parent]
self.set_position(position[parent] , _A )
else:
__SCREAMING_SNAKE_CASE : List[str] = val
__SCREAMING_SNAKE_CASE : List[str] = temp
self.set_position(_A , _A )
break
__SCREAMING_SNAKE_CASE : List[Any] = parent
else:
__SCREAMING_SNAKE_CASE : Tuple = val
__SCREAMING_SNAKE_CASE : List[str] = temp
self.set_position(_A , 0 )
def UpperCAmelCase__ ( self : List[str] , _A : Tuple , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCAmelCase__ ( self : List[str] , _A : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = positions[0]
__SCREAMING_SNAKE_CASE : Tuple = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = Heap()
__SCREAMING_SNAKE_CASE : int = [0] * len(snake_case )
__SCREAMING_SNAKE_CASE : Dict = [-1] * len(snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE : Dict = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE : Optional[int] = []
for vertex in range(len(snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case )
heap.node_position.append(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : str = 1
__SCREAMING_SNAKE_CASE : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = distance
heap.heapify(snake_case , snake_case )
for _ in range(1 , len(snake_case ) ):
__SCREAMING_SNAKE_CASE : Tuple = heap.delete_minimum(snake_case , snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE : List[Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case )]
):
__SCREAMING_SNAKE_CASE : int = distance
heap.bottom_to_top(
snake_case , heap.get_position(snake_case ) , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Any = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowercase_ = int(input("""Enter number of edges: """).strip())
lowercase_ = defaultdict(list)
for _ in range(edges_number):
lowercase_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 303
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class _A ( _lowerCamelCase ):
"""simple docstring"""
_UpperCamelCase : Tuple = '''xmod'''
def __init__( self : Optional[Any] , _A : Union[str, Any]=30_522 , _A : List[Any]=768 , _A : Optional[Any]=12 , _A : Any=12 , _A : Tuple=3_072 , _A : Optional[int]="gelu" , _A : List[Any]=0.1 , _A : str=0.1 , _A : List[Any]=512 , _A : List[str]=2 , _A : str=0.02 , _A : Any=1E-12 , _A : Union[str, Any]=1 , _A : List[Any]=0 , _A : Dict=2 , _A : int="absolute" , _A : Dict=True , _A : int=None , _A : List[str]=False , _A : Dict=2 , _A : int=False , _A : Optional[int]=True , _A : Any=True , _A : Optional[int]=("en_XX",) , _A : Any=None , **_A : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
lowercase : Optional[Any] = vocab_size
lowercase : Union[str, Any] = hidden_size
lowercase : Optional[Any] = num_hidden_layers
lowercase : Dict = num_attention_heads
lowercase : Union[str, Any] = hidden_act
lowercase : Tuple = intermediate_size
lowercase : List[str] = hidden_dropout_prob
lowercase : Union[str, Any] = attention_probs_dropout_prob
lowercase : Dict = max_position_embeddings
lowercase : Any = type_vocab_size
lowercase : Optional[Any] = initializer_range
lowercase : str = layer_norm_eps
lowercase : Tuple = position_embedding_type
lowercase : Optional[Any] = use_cache
lowercase : int = classifier_dropout
lowercase : Optional[int] = pre_norm
lowercase : Any = adapter_reduction_factor
lowercase : Union[str, Any] = adapter_layer_norm
lowercase : Optional[int] = adapter_reuse_layer_norm
lowercase : Optional[Any] = ln_before_adapter
lowercase : Union[str, Any] = list(_A )
lowercase : List[Any] = default_language
class _A ( _lowerCamelCase ):
"""simple docstring"""
@property
def __a ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 353
|
from __future__ import annotations
import time
lowerCAmelCase_ = list[tuple[int, int]]
lowerCAmelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCAmelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _A :
def __init__( self : List[Any] , _A : int , _A : int , _A : int , _A : int , _A : Node | None ) -> str:
"""simple docstring"""
lowercase : Any = pos_x
lowercase : Union[str, Any] = pos_y
lowercase : int = (pos_y, pos_x)
lowercase : List[str] = goal_x
lowercase : Optional[int] = goal_y
lowercase : Union[str, Any] = parent
class _A :
def __init__( self : Optional[Any] , _A : tuple[int, int] , _A : tuple[int, int] ) -> List[Any]:
"""simple docstring"""
lowercase : int = Node(start[1] , start[0] , goal[1] , goal[0] , _A )
lowercase : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , _A )
lowercase : List[str] = [self.start]
lowercase : Optional[int] = False
def __a ( self : Dict ) -> Path | None:
"""simple docstring"""
while self.node_queue:
lowercase : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowercase : int = True
return self.retrace_path(_A )
lowercase : Union[str, Any] = self.get_successors(_A )
for node in successors:
self.node_queue.append(_A )
if not self.reached:
return [self.start.pos]
return None
def __a ( self : str , _A : Node ) -> list[Node]:
"""simple docstring"""
lowercase : Union[str, Any] = []
for action in delta:
lowercase : List[str] = parent.pos_x + action[1]
lowercase : str = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_A , _A , self.target.pos_y , self.target.pos_x , _A ) )
return successors
def __a ( self : Any , _A : Node | None ) -> Path:
"""simple docstring"""
lowercase : Optional[int] = node
lowercase : Dict = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowercase : Tuple = current_node.parent
path.reverse()
return path
class _A :
def __init__( self : Optional[int] , _A : List[str] , _A : List[str] ) -> int:
"""simple docstring"""
lowercase : str = BreadthFirstSearch(_A , _A )
lowercase : Optional[Any] = BreadthFirstSearch(_A , _A )
lowercase : List[str] = False
def __a ( self : Tuple ) -> Path | None:
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowercase : str = self.fwd_bfs.node_queue.pop(0 )
lowercase : Tuple = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowercase : Optional[Any] = True
return self.retrace_bidirectional_path(
_A , _A )
lowercase : Optional[int] = current_bwd_node
lowercase : Optional[int] = current_fwd_node
lowercase : Union[str, Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(_A ),
self.bwd_bfs: self.bwd_bfs.get_successors(_A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def __a ( self : List[Any] , _A : Node , _A : Node ) -> Path:
"""simple docstring"""
lowercase : Optional[Any] = self.fwd_bfs.retrace_path(_A )
lowercase : Union[str, Any] = self.bwd_bfs.retrace_path(_A )
bwd_path.pop()
bwd_path.reverse()
lowercase : Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCAmelCase_ = (0, 0)
lowerCAmelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCAmelCase_ = time.time()
lowerCAmelCase_ = BreadthFirstSearch(init, goal)
lowerCAmelCase_ = bfs.search()
lowerCAmelCase_ = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
lowerCAmelCase_ = time.time()
lowerCAmelCase_ = BidirectionalBreadthFirstSearch(init, goal)
lowerCAmelCase_ = bd_bfs.search()
lowerCAmelCase_ = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 116
| 0
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=6_4 , __lowerCAmelCase=3_2 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = embedding_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = MegatronBertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = MegatronBertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = MegatronBertForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = MegatronBertForNextSentencePrediction(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = MegatronBertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = MegatronBertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MegatronBertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MegatronBertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_choices
lowerCamelCase__ = MegatronBertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
# test_resize_embeddings = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
'''simple docstring'''
lowerCamelCase__ = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
lowerCamelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase )
lowerCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = MegatronBertModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowerCAmelCase )
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
return torch.tensor(
__snake_case ,dtype=torch.long ,device=__snake_case ,)
_a = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip('''Model is not available.''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
lowerCamelCase__ = os.path.join(os.environ['''MYDIR'''] , __lowerCAmelCase )
lowerCamelCase__ = MegatronBertModel.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.half()
lowerCamelCase__ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowerCamelCase__ = model(__lowerCAmelCase )[0]
lowerCamelCase__ = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , __lowerCAmelCase )
lowerCamelCase__ = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
lowerCamelCase__ = output[0, ii, jj]
lowerCamelCase__ = expected[3 * ii + jj]
lowerCamelCase__ = '''ii={} jj={} a={} b={}'''.format(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertTrue(math.isclose(__lowerCAmelCase , __lowerCAmelCase , rel_tol=__lowerCAmelCase , abs_tol=__lowerCAmelCase ) , msg=__lowerCAmelCase )
| 209
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_a = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
lowerCamelCase__ = getattr(__snake_case ,__snake_case )
if weight_type is not None:
lowerCamelCase__ = getattr(__snake_case ,__snake_case ).shape
else:
lowerCamelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCamelCase__ = value
elif weight_type == "weight_g":
lowerCamelCase__ = value
elif weight_type == "weight_v":
lowerCamelCase__ = value
elif weight_type == "bias":
lowerCamelCase__ = value
else:
lowerCamelCase__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = fairseq_model.state_dict()
lowerCamelCase__ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCamelCase__ = None
for name, value in fairseq_dict.items():
lowerCamelCase__ = False
if "conv_layers" in name:
load_conv_layer(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,hf_model.config.feat_extract_norm == '''group''' ,)
lowerCamelCase__ = True
elif name.split('''.''' )[0] == "proj":
lowerCamelCase__ = fairseq_model.proj
lowerCamelCase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCamelCase__ = True
if "*" in mapped_key:
lowerCamelCase__ = name.split(__snake_case )[0].split('''.''' )[-2]
lowerCamelCase__ = mapped_key.replace('''*''' ,__snake_case )
if "weight_g" in name:
lowerCamelCase__ = '''weight_g'''
elif "weight_v" in name:
lowerCamelCase__ = '''weight_v'''
elif "bias" in name:
lowerCamelCase__ = '''bias'''
elif "weight" in name:
lowerCamelCase__ = '''weight'''
else:
lowerCamelCase__ = None
set_recursively(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'Unused weights: {unused_weights}' )
return proj_weight
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = full_name.split('''conv_layers.''' )[-1]
lowerCamelCase__ = name.split('''.''' )
lowerCamelCase__ = int(items[0] )
lowerCamelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCamelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCamelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCamelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCamelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = emb.weight.shape
lowerCamelCase__ = nn.Linear(__snake_case ,__snake_case ,bias=__snake_case )
lowerCamelCase__ = emb.weight.data
return lin_layer
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
with open(__snake_case ,'''r''' ,encoding='''utf-8''' ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = [line.split(''' ''' )[0] for line in lines]
lowerCamelCase__ = len(__snake_case )
lowerCamelCase__ = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(__snake_case ,range(4 ,num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = WavaVecaConfig.from_pretrained(__snake_case )
lowerCamelCase__ = SpeechaTextaConfig.from_pretrained(
__snake_case ,vocab_size=__snake_case ,decoder_layers=__snake_case ,do_stable_layer_norm=__snake_case )
lowerCamelCase__ = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16000 ,padding_value=0 ,do_normalize=__snake_case ,return_attention_mask=__snake_case ,)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
lowerCamelCase__ = model[0].eval()
# set weights for wav2vec2 encoder
lowerCamelCase__ = WavaVecaModel(__snake_case )
lowerCamelCase__ = recursively_load_weights_wavaveca(model.encoder ,__snake_case )
lowerCamelCase__ = SpeechaTextaForCausalLM(__snake_case )
lowerCamelCase__ , lowerCamelCase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=__snake_case )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
lowerCamelCase__ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowerCamelCase__ = SpeechEncoderDecoderModel(encoder=__snake_case ,decoder=__snake_case )
lowerCamelCase__ = False
# add projection layer
lowerCamelCase__ = nn.Parameter(projection_layer.weight )
lowerCamelCase__ = nn.Parameter(projection_layer.bias )
lowerCamelCase__ = create_vocab_dict(__snake_case )
with open(os.path.join(__snake_case ,'''vocab.json''' ) ,'''w''' ) as fp:
json.dump(__snake_case ,__snake_case )
lowerCamelCase__ = SpeechaTextaTokenizer(os.path.join(__snake_case ,'''vocab.json''' ) )
tokenizer.save_pretrained(__snake_case )
lowerCamelCase__ = hf_wavavec.config.to_dict()
lowerCamelCase__ = tokenizer.pad_token_id
lowerCamelCase__ = tokenizer.bos_token_id
lowerCamelCase__ = tokenizer.eos_token_id
lowerCamelCase__ = '''speech_to_text_2'''
lowerCamelCase__ = '''wav2vec2'''
lowerCamelCase__ = SpeechEncoderDecoderConfig.from_dict(__snake_case )
hf_wavavec.save_pretrained(__snake_case )
feature_extractor.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10_224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
_a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 209
| 1
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A( a ):
snake_case_ = ['''image_processor''', '''tokenizer''']
snake_case_ = '''BlipImageProcessor'''
snake_case_ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , _snake_case , _snake_case ) -> List[Any]:
'''simple docstring'''
__a = False
super().__init__(_snake_case , _snake_case )
__a = self.image_processor
def __call__( self , _snake_case = None , _snake_case = None , _snake_case = True , _snake_case = False , _snake_case = None , _snake_case = None , _snake_case = 0 , _snake_case = None , _snake_case = None , _snake_case = False , _snake_case = False , _snake_case = False , _snake_case = False , _snake_case = False , _snake_case = True , _snake_case = None , **_snake_case , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
__a = self.tokenizer
__a = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_token_type_ids=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
return text_encoding
# add pixel_values
__a = self.image_processor(_snake_case , return_tensors=_snake_case )
if text is not None:
__a = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_token_type_ids=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
else:
__a = None
if text_encoding is not None:
encoding_image_processor.update(_snake_case )
return encoding_image_processor
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 33
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : List[Any] = logging.get_logger(__name__)
A : Optional[Any] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class __A( a ):
snake_case_ = '''table-transformer'''
snake_case_ = ['''past_key_values''']
snake_case_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _snake_case=True , _snake_case=None , _snake_case=3 , _snake_case=100 , _snake_case=6 , _snake_case=2_048 , _snake_case=8 , _snake_case=6 , _snake_case=2_048 , _snake_case=8 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=True , _snake_case="relu" , _snake_case=256 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1.0 , _snake_case=False , _snake_case="sine" , _snake_case="resnet50" , _snake_case=True , _snake_case=False , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=1 , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=0.1 , **_snake_case , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__a = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_snake_case , _snake_case ):
__a = backbone_config.get('''model_type''' )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(_snake_case )
# set timm attributes to None
__a , __a , __a = None, None, None
__a = use_timm_backbone
__a = backbone_config
__a = num_channels
__a = num_queries
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = init_xavier_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = encoder_layers
__a = auxiliary_loss
__a = position_embedding_type
__a = backbone
__a = use_pretrained_backbone
__a = dilation
# Hungarian matcher
__a = class_cost
__a = bbox_cost
__a = giou_cost
# Loss coefficients
__a = mask_loss_coefficient
__a = dice_loss_coefficient
__a = bbox_loss_coefficient
__a = giou_loss_coefficient
__a = eos_coefficient
super().__init__(is_encoder_decoder=_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.d_model
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return 12
| 33
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "wav2vec2"
def __init__( self, lowerCAmelCase__=32, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=3072, lowerCAmelCase__="gelu", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-5, lowerCAmelCase__="group", lowerCAmelCase__="gelu", lowerCAmelCase__=(512, 512, 512, 512, 512, 512, 512), lowerCAmelCase__=(5, 2, 2, 2, 2, 2, 2), lowerCAmelCase__=(10, 3, 3, 3, 3, 2, 2), lowerCAmelCase__=False, lowerCAmelCase__=128, lowerCAmelCase__=16, lowerCAmelCase__=False, lowerCAmelCase__=True, lowerCAmelCase__=0.05, lowerCAmelCase__=10, lowerCAmelCase__=2, lowerCAmelCase__=0.0, lowerCAmelCase__=10, lowerCAmelCase__=0, lowerCAmelCase__=320, lowerCAmelCase__=2, lowerCAmelCase__=0.1, lowerCAmelCase__=100, lowerCAmelCase__=256, lowerCAmelCase__=256, lowerCAmelCase__=0.1, lowerCAmelCase__="sum", lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=256, lowerCAmelCase__=(512, 512, 512, 512, 1500), lowerCAmelCase__=(5, 3, 3, 1, 1), lowerCAmelCase__=(1, 2, 3, 1, 1), lowerCAmelCase__=512, lowerCAmelCase__=0, lowerCAmelCase__=1, lowerCAmelCase__=2, lowerCAmelCase__=False, lowerCAmelCase__=3, lowerCAmelCase__=2, lowerCAmelCase__=3, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> List[Any]:
super().__init__(**lowerCAmelCase__, pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__)
snake_case_ = hidden_size
snake_case_ = feat_extract_norm
snake_case_ = feat_extract_activation
snake_case_ = list(lowerCAmelCase__)
snake_case_ = list(lowerCAmelCase__)
snake_case_ = list(lowerCAmelCase__)
snake_case_ = conv_bias
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = len(self.conv_dim)
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = vocab_size
snake_case_ = do_stable_layer_norm
snake_case_ = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = apply_spec_augment
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
snake_case_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case_ = num_codevectors_per_group
snake_case_ = num_codevector_groups
snake_case_ = contrastive_logits_temperature
snake_case_ = feat_quantizer_dropout
snake_case_ = num_negatives
snake_case_ = codevector_dim
snake_case_ = proj_codevector_dim
snake_case_ = diversity_loss_weight
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# adapter
snake_case_ = add_adapter
snake_case_ = adapter_kernel_size
snake_case_ = adapter_stride
snake_case_ = num_adapter_layers
snake_case_ = output_hidden_size or hidden_size
snake_case_ = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case_ = list(lowerCAmelCase__)
snake_case_ = list(lowerCAmelCase__)
snake_case_ = list(lowerCAmelCase__)
snake_case_ = xvector_output_dim
@property
def a_ ( self) -> List[Any]:
return functools.reduce(operator.mul, self.conv_stride, 1)
| 69
|
"""simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__UpperCamelCase = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__UpperCamelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__UpperCamelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__UpperCamelCase = re.compile(r'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__UpperCamelCase = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def UpperCAmelCase ( UpperCAmelCase ) -> List[Any]:
snake_case_ = None
# source code of `config_class`
snake_case_ = inspect.getsource(UpperCAmelCase )
snake_case_ = _re_checkpoint.findall(UpperCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
snake_case_ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
snake_case_ = ckpt_name
break
return checkpoint
def UpperCAmelCase ( ) -> Union[str, Any]:
snake_case_ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
snake_case_ = get_checkpoint_from_config_class(UpperCAmelCase )
snake_case_ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
snake_case_ = '\n'.join(sorted(UpperCAmelCase ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 69
| 1
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class a__ ( UpperCamelCase__ ):
a : Dict = """Wav2Vec2FeatureExtractor"""
a : Optional[Any] = """AutoTokenizer"""
def __init__( self , A , A ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(A , A )
a = self.feature_extractor
a = False
@classmethod
def lowerCAmelCase_ ( cls , A , **A ) -> Tuple:
'''simple docstring'''
try:
return super().from_pretrained(A , **A )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , A , )
a = WavaVecaFeatureExtractor.from_pretrained(A , **A )
a = WavaVecaCTCTokenizer.from_pretrained(A , **A )
return cls(feature_extractor=A , tokenizer=A )
def __call__( self , *A , **A ) -> Optional[int]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*A , **A )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
a = kwargs.pop("raw_speech" )
else:
a = kwargs.pop("audio" , A )
a = kwargs.pop("sampling_rate" , A )
a = kwargs.pop("text" , A )
if len(A ) > 0:
a = args[0]
a = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
a = self.feature_extractor(A , *A , sampling_rate=A , **A )
if text is not None:
a = self.tokenizer(A , **A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a = encodings["input_ids"]
return inputs
def lowerCAmelCase_ ( self , *A , **A ) -> Union[str, Any]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*A , **A )
a = kwargs.pop("input_features" , A )
a = kwargs.pop("labels" , A )
if len(A ) > 0:
a = args[0]
a = args[1:]
if input_features is not None:
a = self.feature_extractor.pad(A , *A , **A )
if labels is not None:
a = self.tokenizer.pad(A , **A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
a = labels["input_ids"]
return input_features
def lowerCAmelCase_ ( self , *A , **A ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*A , **A )
def lowerCAmelCase_ ( self , *A , **A ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*A , **A )
@contextmanager
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
a = True
a = self.tokenizer
yield
a = self.feature_extractor
a = False
| 180
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : int = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class a__ ( UpperCamelCase__ ):
a : Optional[Any] = """sew-d"""
def __init__( self , A=32 , A=768 , A=12 , A=12 , A=3072 , A=2 , A=512 , A=256 , A=True , A=True , A=("p2c", "c2p") , A="layer_norm" , A="gelu_python" , A=0.1 , A=0.1 , A=0.1 , A=0.0 , A=0.1 , A=0.0_2 , A=1e-7 , A=1e-5 , A="group" , A="gelu" , A=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A=False , A=128 , A=16 , A=True , A=0.0_5 , A=10 , A=2 , A=0.0 , A=10 , A=0 , A="mean" , A=False , A=False , A=256 , A=0 , A=1 , A=2 , **A , ) -> Dict:
'''simple docstring'''
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A )
a = hidden_size
a = feat_extract_norm
a = feat_extract_activation
a = list(A )
a = list(A )
a = list(A )
a = conv_bias
a = num_conv_pos_embeddings
a = num_conv_pos_embedding_groups
a = len(self.conv_dim )
a = num_hidden_layers
a = intermediate_size
a = squeeze_factor
a = max_position_embeddings
a = position_buckets
a = share_att_key
a = relative_attention
a = norm_rel_ebd
a = list(A )
a = hidden_act
a = num_attention_heads
a = hidden_dropout
a = attention_dropout
a = activation_dropout
a = feat_proj_dropout
a = final_dropout
a = layer_norm_eps
a = feature_layer_norm_eps
a = initializer_range
a = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a = apply_spec_augment
a = mask_time_prob
a = mask_time_length
a = mask_time_min_masks
a = mask_feature_prob
a = mask_feature_length
a = mask_feature_min_masks
# ctc loss
a = ctc_loss_reduction
a = ctc_zero_infinity
# sequence classification
a = use_weighted_layer_sum
a = classifier_proj_size
@property
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 180
| 1
|
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
__lowerCamelCase = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(UpperCamelCase__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__UpperCAmelCase =["gpt2"]
__UpperCAmelCase ="gpt2"
if is_tf_available():
class a__ ( tf.Module ):
def __init__( self : str , a : Union[str, Any] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = tokenizer
__lowerCamelCase = AutoConfig.from_pretrained(a )
__lowerCamelCase = TFGPTaLMHeadModel.from_config(a )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def SCREAMING_SNAKE_CASE__ ( self : str , a : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer(a )
__lowerCamelCase = tokenized['''input_ids'''].to_tensor()
__lowerCamelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowerCamelCase = self.model(input_ids=a , attention_mask=a )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
__lowerCamelCase = [GPTaTokenizer.from_pretrained(a ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowerCamelCase = [TFGPTaTokenizer.from_pretrained(a ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowerCamelCase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowerCamelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowerCamelCase = tokenizer([test_inputs] , return_tensors='''tf''' )
__lowerCamelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowerCamelCase = python_outputs[key].numpy()
__lowerCamelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(a , tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.function(a )
for test_inputs in self.test_sentences:
__lowerCamelCase = tf.constant(a )
__lowerCamelCase = compiled_tokenizer(a )
__lowerCamelCase = tf_tokenizer(a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = ModelToSave(tokenizer=a )
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = model.serving(a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowerCamelCase = Path(a ) / '''saved.model'''
tf.saved_model.save(a , a , signatures={'''serving_default''': model.serving} )
__lowerCamelCase = tf.saved_model.load(a )
__lowerCamelCase = loaded_model.signatures['''serving_default'''](a )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = tf_tokenizer(a ) # Build model with some sample inputs
__lowerCamelCase = tf_tokenizer.get_config()
__lowerCamelCase = TFGPTaTokenizer.from_config(a )
__lowerCamelCase = model_from_config(a )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowerCamelCase = 12_31_23
for max_length in [3, 5, 10_24]:
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = tf_tokenizer(a , max_length=a )
__lowerCamelCase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 67
| 1
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __UpperCamelCase ( _A , _A , _A , _A , _A=True , _A="pt" ):
lowerCAmelCase_ = {'''add_prefix_space''': True} if isinstance(_A , _A ) and not line.startswith(''' ''' ) else {}
lowerCAmelCase_ = padding_side
return tokenizer(
[line] , max_length=_A , padding='''max_length''' if pad_to_max_length else None , truncation=_A , return_tensors=_A , add_special_tokens=_A , **_A , )
def __UpperCamelCase ( _A , _A , _A=None , ):
lowerCAmelCase_ = input_ids.ne(_A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A ( __UpperCAmelCase ):
def __init__( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__="train", UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__="", ):
"""simple docstring"""
super().__init__()
lowerCAmelCase_ = Path(UpperCamelCase__ ).joinpath(type_path + '''.source''' )
lowerCAmelCase_ = Path(UpperCamelCase__ ).joinpath(type_path + '''.target''' )
lowerCAmelCase_ = self.get_char_lens(self.src_file )
lowerCAmelCase_ = max_source_length
lowerCAmelCase_ = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
lowerCAmelCase_ = tokenizer
lowerCAmelCase_ = prefix
if n_obs is not None:
lowerCAmelCase_ = self.src_lens[:n_obs]
lowerCAmelCase_ = src_lang
lowerCAmelCase_ = tgt_lang
def __len__( self ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = index + 1 # linecache starts at 1
lowerCAmelCase_ = self.prefix + linecache.getline(str(self.src_file ), UpperCamelCase__ ).rstrip('''\n''' )
lowerCAmelCase_ = linecache.getline(str(self.tgt_file ), UpperCamelCase__ ).rstrip('''\n''' )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer, UpperCamelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowerCAmelCase_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer, UpperCamelCase__ ) else self.tokenizer
)
lowerCAmelCase_ = self.tokenizer.generator if isinstance(self.tokenizer, UpperCamelCase__ ) else self.tokenizer
lowerCAmelCase_ = encode_line(UpperCamelCase__, UpperCamelCase__, self.max_source_length, '''right''' )
lowerCAmelCase_ = encode_line(UpperCamelCase__, UpperCamelCase__, self.max_target_length, '''right''' )
lowerCAmelCase_ = source_inputs['''input_ids'''].squeeze()
lowerCAmelCase_ = target_inputs['''input_ids'''].squeeze()
lowerCAmelCase_ = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
"""simple docstring"""
return [len(UpperCamelCase__ ) for x in Path(UpperCamelCase__ ).open().readlines()]
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = torch.stack([x['''input_ids'''] for x in batch] )
lowerCAmelCase_ = torch.stack([x['''attention_mask'''] for x in batch] )
lowerCAmelCase_ = torch.stack([x['''decoder_input_ids'''] for x in batch] )
lowerCAmelCase_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer, UpperCamelCase__ )
else self.tokenizer.pad_token_id
)
lowerCAmelCase_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer, UpperCamelCase__ )
else self.tokenizer.pad_token_id
)
lowerCAmelCase_ = trim_batch(UpperCamelCase__, UpperCamelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ = trim_batch(UpperCamelCase__, UpperCamelCase__, attention_mask=UpperCamelCase__ )
lowerCAmelCase_ = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
_A = getLogger(__name__)
def __UpperCamelCase ( _A ):
return list(itertools.chain.from_iterable(_A ) )
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = get_git_info()
save_json(_A , os.path.join(_A , '''git_log.json''' ) )
def __UpperCamelCase ( _A , _A , _A=4 , **_A ):
with open(_A , '''w''' ) as f:
json.dump(_A , _A , indent=_A , **_A )
def __UpperCamelCase ( _A ):
with open(_A ) as f:
return json.load(_A )
def __UpperCamelCase ( ):
lowerCAmelCase_ = git.Repo(search_parent_directories=_A )
lowerCAmelCase_ = {
'''repo_id''': str(_A ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def __UpperCamelCase ( _A , _A ):
return list(map(_A , _A ) )
def __UpperCamelCase ( _A , _A ):
with open(_A , '''wb''' ) as f:
return pickle.dump(_A , _A )
def __UpperCamelCase ( _A ):
def remove_articles(_A ):
return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , _A )
def white_space_fix(_A ):
return " ".join(text.split() )
def remove_punc(_A ):
lowerCAmelCase_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = normalize_answer(_A ).split()
lowerCAmelCase_ = normalize_answer(_A ).split()
lowerCAmelCase_ = Counter(_A ) & Counter(_A )
lowerCAmelCase_ = sum(common.values() )
if num_same == 0:
return 0
lowerCAmelCase_ = 1.0 * num_same / len(_A )
lowerCAmelCase_ = 1.0 * num_same / len(_A )
lowerCAmelCase_ = (2 * precision * recall) / (precision + recall)
return fa
def __UpperCamelCase ( _A , _A ):
return normalize_answer(_A ) == normalize_answer(_A )
def __UpperCamelCase ( _A , _A ):
assert len(_A ) == len(_A )
lowerCAmelCase_ = 0
for hypo, pred in zip(_A , _A ):
em += exact_match_score(_A , _A )
if len(_A ) > 0:
em /= len(_A )
return {"em": em}
def __UpperCamelCase ( _A ):
return model_prefix.startswith('''rag''' )
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowerCAmelCase_ = '''dropout_rate'''
for p in extra_params:
if getattr(_A , _A , _A ):
if not hasattr(_A , _A ) and not hasattr(_A , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(_A ) )
delattr(_A , _A )
continue
lowerCAmelCase_ = p if hasattr(_A , _A ) else equivalent_param[p]
setattr(_A , _A , getattr(_A , _A ) )
delattr(_A , _A )
return hparams, config
| 167
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_A = logging.get_logger(__name__)
class A ( __UpperCAmelCase ):
__snake_case = ['pixel_values']
def __init__( self, UpperCamelCase__ = True, UpperCamelCase__ = 32, UpperCamelCase__=PILImageResampling.BILINEAR, UpperCamelCase__ = True, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = size_divisor
lowerCAmelCase_ = resample
super().__init__(**UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(UpperCamelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
lowerCAmelCase_ = height // size_divisor * size_divisor
lowerCAmelCase_ = width // size_divisor * size_divisor
lowerCAmelCase_ = resize(UpperCamelCase__, (new_h, new_w), resample=UpperCamelCase__, data_format=UpperCamelCase__, **UpperCamelCase__ )
return image
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = None, **UpperCamelCase__ ):
"""simple docstring"""
return rescale(image=UpperCamelCase__, scale=UpperCamelCase__, data_format=UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__=None, UpperCamelCase__ = None, UpperCamelCase__ = None, UpperCamelCase__ = ChannelDimension.FIRST, **UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = size_divisor if size_divisor is not None else self.size_divisor
lowerCAmelCase_ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
lowerCAmelCase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(UpperCamelCase__ ) for img in images]
if do_resize:
lowerCAmelCase_ = [self.resize(UpperCamelCase__, size_divisor=UpperCamelCase__, resample=UpperCamelCase__ ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(UpperCamelCase__, scale=1 / 255 ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(UpperCamelCase__, UpperCamelCase__ ) for image in images]
lowerCAmelCase_ = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__, tensor_type=UpperCamelCase__ )
| 167
| 1
|
def UpperCamelCase ( __lowerCamelCase : int ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("only integers accepted as input" )
else:
snake_case : Dict = str(abs(_lowerCAmelCase ) )
snake_case : Tuple = [list(_lowerCAmelCase ) for char in range(len(_lowerCAmelCase ) )]
for index in range(len(_lowerCAmelCase ) ):
num_transpositions[index].pop(_lowerCAmelCase )
return max(
int("".join(list(_lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 59
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
A : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
A : Tuple = tokenizer("""Hello there""", return_tensors="""tf""" ).input_ids
A : Optional[int] = tokenizer("""Hi I am""", return_tensors="""tf""" ).input_ids
A : List[Any] = model(lowerCamelCase__, labels=lowerCamelCase__ ).loss
A : str = -tf.math.reduce_mean(lowerCamelCase__ ).numpy()
A : Tuple = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 116
| 0
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = None
if token is not None:
_lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
_lowerCAmelCase : Tuple = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
_lowerCAmelCase : str = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ ).json()
_lowerCAmelCase : Optional[Any] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_lowerCAmelCase : List[str] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCamelCase_ ):
_lowerCAmelCase : int = requests.get(url + F"&page={i + 2}" , headers=UpperCamelCase_ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any]=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = None
if token is not None:
_lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
_lowerCAmelCase : Optional[int] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
_lowerCAmelCase : Optional[int] = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ ).json()
_lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_lowerCAmelCase : List[str] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCamelCase_ ):
_lowerCAmelCase : List[str] = requests.get(url + F"&page={i + 2}" , headers=UpperCamelCase_ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any ):
'''simple docstring'''
_lowerCAmelCase : str = None
if token is not None:
_lowerCAmelCase : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
_lowerCAmelCase : List[str] = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ , allow_redirects=UpperCamelCase_ )
_lowerCAmelCase : List[str] = result.headers["""Location"""]
_lowerCAmelCase : List[Any] = requests.get(UpperCamelCase_ , allow_redirects=UpperCamelCase_ )
_lowerCAmelCase : int = os.path.join(UpperCamelCase_ , F"{artifact_name}.zip" )
with open(UpperCamelCase_ , """wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase (UpperCamelCase_ : int , UpperCamelCase_ : Optional[int]=None ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Any = []
_lowerCAmelCase : Union[str, Any] = None
with zipfile.ZipFile(UpperCamelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCamelCase_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(UpperCamelCase_ ) as f:
for line in f:
_lowerCAmelCase : List[str] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_lowerCAmelCase : Union[str, Any] = line[: line.index(""": """ )]
_lowerCAmelCase : Union[str, Any] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_lowerCAmelCase : Tuple = line[len("""FAILED """ ) :]
failed_tests.append(UpperCamelCase_ )
elif filename == "job_name.txt":
_lowerCAmelCase : str = line
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCamelCase_ )} for `errors` "
F"and {len(UpperCamelCase_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
_lowerCAmelCase : int = None
if job_name and job_links:
_lowerCAmelCase : Optional[int] = job_links.get(UpperCamelCase_ , UpperCamelCase_ )
# A list with elements of the form (line of error, error, failed test)
_lowerCAmelCase : Tuple = [x + [y] + [job_link] for x, y in zip(UpperCamelCase_ , UpperCamelCase_ )]
return result
def _UpperCAmelCase (UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : List[Any] = [os.path.join(UpperCamelCase_ , UpperCamelCase_ ) for p in os.listdir(UpperCamelCase_ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(UpperCamelCase_ , job_links=UpperCamelCase_ ) )
return errors
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Counter()
counter.update([x[1] for x in logs] )
_lowerCAmelCase : Dict = counter.most_common()
_lowerCAmelCase : Dict = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_lowerCAmelCase : Union[str, Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_lowerCAmelCase : int = dict(sorted(r.items() , key=lambda UpperCamelCase_ : item[1]["count"] , reverse=UpperCamelCase_ ) )
return r
def _UpperCAmelCase (UpperCamelCase_ : Tuple ):
'''simple docstring'''
_lowerCAmelCase : List[str] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_lowerCAmelCase : Optional[Any] = test.split("""/""" )[2]
else:
_lowerCAmelCase : Union[str, Any] = None
return test
def _UpperCAmelCase (UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
_lowerCAmelCase : List[str] = [x for x in logs if x[2] is not None]
_lowerCAmelCase : int = {x[2] for x in logs}
_lowerCAmelCase : str = {}
for test in tests:
_lowerCAmelCase : Union[str, Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_lowerCAmelCase : List[Any] = counter.most_common()
_lowerCAmelCase : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_lowerCAmelCase : List[str] = sum(error_counts.values() )
if n_errors > 0:
_lowerCAmelCase : int = {"""count""": n_errors, """errors""": error_counts}
_lowerCAmelCase : Dict = dict(sorted(r.items() , key=lambda UpperCamelCase_ : item[1]["count"] , reverse=UpperCamelCase_ ) )
return r
def _UpperCAmelCase (UpperCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = """| no. | error | status |"""
_lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
_lowerCAmelCase : str = [header, sep]
for error in reduced_by_error:
_lowerCAmelCase : Optional[Any] = reduced_by_error[error]["""count"""]
_lowerCAmelCase : int = F"| {count} | {error[:100]} | |"
lines.append(UpperCamelCase_ )
return "\n".join(UpperCamelCase_ )
def _UpperCAmelCase (UpperCamelCase_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
_lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
_lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
_lowerCAmelCase : Union[str, Any] = reduced_by_model[model]["""count"""]
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = list(reduced_by_model[model]["""errors"""].items() )[0]
_lowerCAmelCase : str = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(UpperCamelCase_ )
return "\n".join(UpperCamelCase_ )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_lowerCamelCase : Tuple = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_lowerCamelCase : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
_lowerCamelCase : int = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_lowerCamelCase : Optional[Any] = k.find(" / ")
_lowerCamelCase : Tuple = k[index + len(" / ") :]
_lowerCamelCase : List[Any] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_lowerCamelCase : Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_lowerCamelCase : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_lowerCamelCase : Dict = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_lowerCamelCase : Union[str, Any] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_lowerCamelCase : str = reduce_by_error(errors)
_lowerCamelCase : Tuple = reduce_by_model(errors)
_lowerCamelCase : List[str] = make_github_table(reduced_by_error)
_lowerCamelCase : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 159
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __snake_case (_a ):
lowerCAmelCase__ = "ibert"
def __init__( self : int , _UpperCAmelCase : Optional[int]=3_0522 , _UpperCAmelCase : Union[str, Any]=768 , _UpperCAmelCase : str=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Any=3072 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : str=1E-12 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Any="none" , **_UpperCAmelCase : Optional[int] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = type_vocab_size
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : str = position_embedding_type
_lowerCAmelCase : int = quant_mode
_lowerCAmelCase : str = force_dequant
class __snake_case (_a ):
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 159
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''roberta''', choices=['''roberta''', '''gpt2'''])
parser.add_argument('''--model_name''', default='''roberta-large''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_roberta_048131723.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
__A : Union[str, Any] = parser.parse_args()
if args.model_type == "roberta":
__A : Any = RobertaForMaskedLM.from_pretrained(args.model_name)
__A : Optional[Any] = '''roberta'''
elif args.model_type == "gpt2":
__A : Tuple = GPTaLMHeadModel.from_pretrained(args.model_name)
__A : List[Any] = '''transformer'''
__A : Any = model.state_dict()
__A : Any = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
__A : Optional[int] = state_dict[F"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
__A : Tuple = F"""{prefix}.embeddings.{w}.weight"""
__A : Union[str, Any] = state_dict[param_name]
for w in ["weight", "bias"]:
__A : Union[str, Any] = F"""{prefix}.embeddings.LayerNorm.{w}"""
__A : List[str] = state_dict[param_name]
# Transformer Blocks #
__A : Optional[int] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
__A : Union[str, Any] = state_dict[
F"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
__A : int = state_dict[F"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
__A : Optional[Any] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
__A : List[str] = state_dict[F"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
__A : List[str] = state_dict[F"""lm_head.dense.{w}"""]
__A : Optional[int] = state_dict[F"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
__A : Union[str, Any] = state_dict[F"""{prefix}.ln_f.{w}"""]
__A : Optional[int] = state_dict['''lm_head.weight''']
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 33
|
"""simple docstring"""
from __future__ import annotations
__A : List[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__A : str = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = []
lowercase_ : List[Any] = len(__snake_case )
for i in range(__snake_case ):
lowercase_ : float = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
lowercase_ : List[str] = arr[j]
break
result.append(__snake_case )
return result
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = []
for i, outer in enumerate(__snake_case ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : List[Any] = inner
break
result.append(__snake_case )
return result
def lowercase ( __snake_case : list[float] ):
lowercase_ : List[str] = len(__snake_case )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__A : int = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 33
| 1
|
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class a__ ( snake_case_ ):
def __init__( self : List[Any] , a : int , a : Tuple=None , a : Optional[int]=True , a : List[Any]=None , **a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = config_class
__lowerCamelCase = has_text_modality
__lowerCamelCase = kwargs
__lowerCamelCase = common_properties
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.config_class(**self.inputs_dict )
__lowerCamelCase = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_A , _A ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(_A ):
try:
setattr(_A , _A , _A )
self.parent.assertEqual(
getattr(_A , _A ) , _A , msg=f"""`{name} value {idx} expected, but was {getattr(_A , _A )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_A ):
try:
__lowerCamelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_A , _A ) , _A , msg=f"""`{name} value {idx} expected, but was {getattr(_A , _A )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.config_class(**self.inputs_dict )
__lowerCamelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _A )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = os.path.join(_A , '''config.json''' )
config_first.to_json_file(_A )
__lowerCamelCase = self.config_class.from_json_file(_A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_A )
__lowerCamelCase = self.config_class.from_pretrained(_A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.config_class(**self.inputs_dict )
__lowerCamelCase = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = os.path.join(_A , _A )
config_first.save_pretrained(_A )
__lowerCamelCase = self.config_class.from_pretrained(_A , subfolder=_A )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__lowerCamelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
if self.config_class.is_composition:
return
__lowerCamelCase = self.config_class()
self.parent.assertIsNotNone(_A )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = copy.deepcopy(_A )
__lowerCamelCase = self.config_class(**_A )
__lowerCamelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(_A , _A ) != value:
wrong_values.append((key, getattr(_A , _A ), value) )
if len(_A ) > 0:
__lowerCamelCase = '\n'.join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 350
|
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> float:
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__lowerCamelCase = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__lowerCamelCase = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237
| 0
|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/dummy-config.json')
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
_A = 0
def UpperCAmelCase ( self ) -> Union[str, Any]:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def UpperCAmelCase ( self ) -> Tuple:
_A = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_A = os.path.join(lowerCAmelCase_ , """fake-roberta""" )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
_A = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertEqual(type(lowerCAmelCase_ ) , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
try:
AutoConfig.register("""custom""" , lowerCAmelCase_ )
# Wrong model type will raise an error
with self.assertRaises(lowerCAmelCase_ ):
AutoConfig.register("""model""" , lowerCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase_ ):
AutoConfig.register("""bert""" , lowerCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
_A = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase_ )
_A = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def UpperCAmelCase ( self ) -> List[str]:
with self.assertRaisesRegex(
lowerCAmelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ):
_A = AutoConfig.from_pretrained("""bert-base""" )
def UpperCAmelCase ( self ) -> str:
with self.assertRaisesRegex(
lowerCAmelCase_ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_A = AutoConfig.from_pretrained(lowerCAmelCase_ , revision="""aaaaaa""" )
def UpperCAmelCase ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
lowerCAmelCase_ , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
_A = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def UpperCAmelCase ( self ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCAmelCase_ ):
_A = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase_ ):
_A = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCAmelCase_ )
_A = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase_ )
_A = AutoConfig.from_pretrained(lowerCAmelCase_ , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def UpperCAmelCase ( self ) -> List[str]:
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Any = '''new-model'''
try:
AutoConfig.register("""new-model""" , lowerCAmelCase_ )
# If remote code is not set, the default is to use local
_A = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
_A = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
_A = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 180
|
from math import isqrt
def snake_case ( snake_case__ :int) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(snake_case__) + 1))
def snake_case ( snake_case__ :int = 10**6) -> int:
_A = 0
_A = 1
_A = 7
while prime_candidate < max_prime:
primes_count += is_prime(snake_case__)
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 180
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __UpperCamelCase :
lowercase : List[str]
lowercase : Optional[str] =None
# Automatically constructed
lowercase : ClassVar[str] ="dict"
lowercase : ClassVar[Any] =None
lowercase : str =field(default='Translation' , init=lowerCamelCase__ , repr=lowerCamelCase__ )
def __call__( self ):
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase__ ( self ):
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __UpperCamelCase :
lowercase : Optional[List] =None
lowercase : Optional[int] =None
lowercase : Optional[str] =None
# Automatically constructed
lowercase : ClassVar[str] ="dict"
lowercase : ClassVar[Any] =None
lowercase : str =field(default='TranslationVariableLanguages' , init=lowerCamelCase__ , repr=lowerCamelCase__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ =len(self.languages ) if self.languages else None
def __call__( self ):
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =set(self.languages )
if self.languages and set(lowerCAmelCase ) - lang_set:
raise ValueError(
f'''Some languages in example ({', '.join(sorted(set(lowerCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(lowerCAmelCase )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ =[]
for lang, text in translation_dict.items():
if isinstance(lowerCAmelCase, lowerCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_, lowerCamelCase_ =zip(*sorted(lowerCAmelCase ) )
return {"language": languages, "translation": translations}
def lowercase__ ( self ):
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 6
|
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def a_ ( __snake_case : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F'''{test_file} instead.''' )
lowerCamelCase_ =components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
lowerCamelCase_ =components[:-1] + [test_fn.replace('''.py''' , '''''' )]
lowerCamelCase_ ='''.'''.join(__snake_case )
return test_module_path
def a_ ( __snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =get_module_path(__snake_case )
lowerCamelCase_ =importlib.import_module(__snake_case )
return test_module
def a_ ( __snake_case : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =get_test_module(__snake_case )
for attr in dir(__snake_case ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(__snake_case , __snake_case ) )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =get_test_module(__snake_case )
for attr in dir(__snake_case ):
lowerCamelCase_ =getattr(__snake_case , __snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowerCamelCase_ =getattr(__snake_case , '''all_model_classes''' , [] )
if len(__snake_case ) > 0:
test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ =set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =test_class()
if hasattr(__snake_case , '''setUp''' ):
test.setUp()
lowerCamelCase_ =None
if hasattr(__snake_case , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowerCamelCase_ =test.model_tester.__class__
return model_tester
def a_ ( __snake_case : Dict , __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ =[]
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ =get_test_classes_for_model(__snake_case , __snake_case )
lowerCamelCase_ =[]
for test_class in test_classes:
lowerCamelCase_ =get_model_tester_from_test_class(__snake_case )
if tester_class is not None:
tester_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ ={test_class: get_model_tester_from_test_class(__snake_case ) for test_class in test_classes}
return test_tester_mapping
def a_ ( __snake_case : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =get_model_classes(__snake_case )
lowerCamelCase_ ={
model_class: get_test_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_test_mapping
def a_ ( __snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =get_model_classes(__snake_case )
lowerCamelCase_ ={
model_class: get_tester_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def a_ ( __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
if isinstance(__snake_case , __snake_case ):
return o
elif isinstance(__snake_case , __snake_case ):
return o.__name__
elif isinstance(__snake_case , (list, tuple) ):
return [to_json(__snake_case ) for x in o]
elif isinstance(__snake_case , __snake_case ):
return {to_json(__snake_case ): to_json(__snake_case ) for k, v in o.items()}
else:
return o
| 6
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Tuple = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 167
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase ( unittest.TestCase):
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Tuple = tempfile.mkdtemp()
# fmt: off
A_ : List[Any] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
A_ : Tuple = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
A_ : Optional[int] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
A_ : Tuple = {'''unk_token''': '''<unk>'''}
A_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
A_ : str = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
A_ : str = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def a_ ( self : Any , **_lowerCamelCase : Dict ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def a_ ( self : Dict , **_lowerCamelCase : Optional[int] ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def a_ ( self : List[str] , **_lowerCamelCase : List[Any] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def a_ ( self : int ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Dict = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A_ : Dict = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : int = self.get_tokenizer()
A_ : int = self.get_rust_tokenizer()
A_ : Optional[Any] = self.get_image_processor()
A_ : Union[str, Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : List[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
A_ : Optional[Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : Any = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def a_ ( self : str ):
"""simple docstring"""
A_ : Tuple = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Dict = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
A_ : List[Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def a_ ( self : int ):
"""simple docstring"""
A_ : List[str] = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Tuple = self.prepare_image_inputs()
A_ : Dict = image_processor(_lowerCamelCase , return_tensors='''np''' )
A_ : Optional[int] = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a_ ( self : str ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : int = self.get_tokenizer()
A_ : int = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Union[str, Any] = '''lower newer'''
A_ : int = processor(text=_lowerCamelCase )
A_ : Any = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self : str ):
"""simple docstring"""
A_ : str = self.get_image_processor()
A_ : List[Any] = self.get_tokenizer()
A_ : Tuple = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Union[str, Any] = '''lower newer'''
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Dict = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : int = self.get_tokenizer()
A_ : Any = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Tuple = self.prepare_image_inputs()
A_ : Tuple = self.prepare_image_inputs()
A_ : Optional[int] = processor(images=_lowerCamelCase , visual_prompt=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Optional[int] = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Optional[Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
A_ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : List[str] = processor.batch_decode(_lowerCamelCase )
A_ : str = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
| 167
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self : Optional[Any] , a : Any , a : Tuple=7 , a : List[Any]=3 , a : Optional[int]=18 , a : Any=30 , a : int=400 , a : List[str]=True , a : Optional[Any]=None , a : Union[str, Any]=True , a : int=False , a : Union[str, Any]=True , a : int=True , a : Dict=[0.5, 0.5, 0.5] , a : str=[0.5, 0.5, 0.5] , )-> Optional[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {'''height''': 18, '''width''': 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Any:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (__lowerCAmelCase , unittest.TestCase ):
_UpperCamelCase : Tuple = DonutImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : str )-> int:
"""simple docstring"""
lowercase__ = DonutImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'size' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'do_thumbnail' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'do_pad' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'image_std' ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> str:
"""simple docstring"""
pass
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Any:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(lowerCamelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[str]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(lowerCamelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : str )-> Tuple:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(lowerCamelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 357
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase )
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : str = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
_UpperCamelCase : ClassVar[Features] = Features({'text': Value('string' )} )
_UpperCamelCase : ClassVar[Features] = Features({} )
_UpperCamelCase : str = "text"
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 269
| 0
|
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def _lowerCAmelCase ( )->Optional[int]:
'''simple docstring'''
snake_case_ = torch.nn.Linear(2 , 4 )
snake_case_ = torch.optim.AdamW(model.parameters() , lr=1.0 )
snake_case_ = torch.optim.lr_scheduler.OneCycleLR(lowerCAmelCase_ , max_lr=0.0_1 , steps_per_epoch=2 , epochs=1 )
snake_case_ = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
snake_case_ = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[Any] )->Dict:
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[int] )->List[str]:
'''simple docstring'''
snake_case_ = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(lowerCAmelCase_ )
class __lowerCAmelCase ( a ):
"""simple docstring"""
@require_cuda
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case_ = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(_lowerCAmelCase ):
snake_case_ = Accelerator(cpu=_lowerCAmelCase )
def lowerCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
snake_case_ = Accelerator()
snake_case_ = GradientState()
assert state.num_steps == 1
snake_case_ = 4
assert state.num_steps == 4
assert state.sync_gradients is True
snake_case_ = False
assert state.sync_gradients is False
GradientState._reset_state()
def lowerCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
snake_case_ = Accelerator()
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = create_components()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def lowerCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
snake_case_ = Accelerator()
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = create_components()
accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def lowerCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*_lowerCAmelCase : List[str] , **_lowerCAmelCase : List[str] ):
pass
with patch("torch.cuda.set_device" , _lowerCAmelCase ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
snake_case_ = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def lowerCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
snake_case_ = Accelerator()
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = create_components()
accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
snake_case_ = get_signature(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowerCAmelCase )
# make sure random weights don't match
load_random_weights(_lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCAmelCase ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(_lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCAmelCase ) ) < 1e-3 )
def lowerCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = Accelerator()
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = create_components()
accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
snake_case_ = get_signature(_lowerCAmelCase )
# saving hook
def save_config(_lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int ):
snake_case_ = {"class_name": models[0].__class__.__name__}
with open(os.path.join(_lowerCAmelCase , "data.json" ) , "w" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# loading hook
def load_config(_lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
with open(os.path.join(_lowerCAmelCase , "data.json" ) , "r" ) as f:
snake_case_ = json.load(_lowerCAmelCase )
snake_case_ = config["class_name"]
snake_case_ = accelerator.register_save_state_pre_hook(_lowerCAmelCase )
snake_case_ = accelerator.register_load_state_pre_hook(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowerCAmelCase )
# make sure random weights don't match with hooks
load_random_weights(_lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCAmelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
snake_case_ = "random"
# make sure loaded weights match with hooks
accelerator.load_state(_lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCAmelCase ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_lowerCAmelCase )
# make sure random weights don't match with hooks removed
load_random_weights(_lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCAmelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
snake_case_ = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(_lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(_lowerCAmelCase ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def lowerCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
snake_case_ = Accelerator()
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = create_components()
snake_case_ = None
# This should work
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(dummy_obj is None )
def lowerCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
snake_case_ = Accelerator()
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = create_components()
snake_case_ = [1, 2, 3]
# This should work
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(
getattr(_lowerCAmelCase , "_is_accelerate_prepared" , _lowerCAmelCase ) , _lowerCAmelCase , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(_lowerCAmelCase , "_is_accelerate_prepared" , _lowerCAmelCase ) , _lowerCAmelCase , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(_lowerCAmelCase , "_is_accelerate_prepared" , _lowerCAmelCase ) , _lowerCAmelCase , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(_lowerCAmelCase , "_is_accelerate_prepared" , _lowerCAmelCase ) , _lowerCAmelCase , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(_lowerCAmelCase , "_is_accelerate_prepared" , _lowerCAmelCase ) , _lowerCAmelCase , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(_lowerCAmelCase , "_is_accelerate_prepared" , _lowerCAmelCase ) , _lowerCAmelCase , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def lowerCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
from transformers import AutoModelForCausalLM
snake_case_ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=_lowerCAmelCase , device_map={"": 0} , )
snake_case_ = Accelerator()
# This should work
snake_case_ = accelerator.prepare(_lowerCAmelCase )
@slow
@require_bnb
def lowerCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
from transformers import AutoModelForCausalLM
snake_case_ = Accelerator()
with init_empty_weights():
snake_case_ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
snake_case_ = infer_auto_device_map(_lowerCAmelCase )
snake_case_ = "cpu"
snake_case_ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=_lowerCAmelCase , load_in_abit=_lowerCAmelCase , llm_inta_enable_fpaa_cpu_offload=_lowerCAmelCase )
# This should not work and get value error
with self.assertRaises(_lowerCAmelCase ):
snake_case_ = accelerator.prepare(_lowerCAmelCase )
@slow
@require_bnb
@require_multi_gpu
def lowerCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
from transformers import AutoModelForCausalLM
snake_case_ = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
snake_case_ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
snake_case_ = infer_auto_device_map(_lowerCAmelCase )
snake_case_ = 1
snake_case_ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=_lowerCAmelCase , device_map=_lowerCAmelCase , )
snake_case_ = Accelerator()
# This should not work and get value error
with self.assertRaises(_lowerCAmelCase ):
snake_case_ = accelerator.prepare(_lowerCAmelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def lowerCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
from transformers import AutoModelForCausalLM
with init_empty_weights():
snake_case_ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
snake_case_ = infer_auto_device_map(_lowerCAmelCase )
snake_case_ = 1
snake_case_ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=_lowerCAmelCase , device_map=_lowerCAmelCase , )
snake_case_ = Accelerator()
# This should work
snake_case_ = accelerator.prepare(_lowerCAmelCase )
@require_cuda
def lowerCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = torch.nn.Linear(1_0 , 1_0 )
snake_case_ = torch.optim.SGD(model.parameters() , lr=0.01 )
snake_case_ = Accelerator(cpu=_lowerCAmelCase )
snake_case_ = accelerator.prepare(_lowerCAmelCase )
| 159
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def _lowerCAmelCase ( lowerCAmelCase_ :str , lowerCAmelCase_ :str = "cpu" , lowerCAmelCase_ :Union[str, None] = None )->None:
'''simple docstring'''
snake_case_ = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCAmelCase_ , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
snake_case_ = v.half()
if save_path is None: # overwrite src_path
snake_case_ = src_path
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
fire.Fire(convert)
| 159
| 1
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__A : Tuple = logging.get_logger(__name__)
__A : Any = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : List[Any] = 'umt5'
lowercase : Union[str, Any] = ['past_key_values']
def __init__( self , SCREAMING_SNAKE_CASE_=25_0112 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=128 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1e-6 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_="gated-gelu" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="T5Tokenizer" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(
is_encoder_decoder=SCREAMING_SNAKE_CASE_ , tokenizer_class=SCREAMING_SNAKE_CASE_ , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Optional[int] = vocab_size
UpperCamelCase : Union[str, Any] = d_model
UpperCamelCase : List[Any] = d_kv
UpperCamelCase : List[str] = d_ff
UpperCamelCase : Optional[int] = num_layers
UpperCamelCase : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCamelCase : Tuple = num_heads
UpperCamelCase : Tuple = relative_attention_num_buckets
UpperCamelCase : Union[str, Any] = relative_attention_max_distance
UpperCamelCase : Tuple = dropout_rate
UpperCamelCase : List[str] = layer_norm_epsilon
UpperCamelCase : List[Any] = initializer_factor
UpperCamelCase : int = feed_forward_proj
UpperCamelCase : Any = use_cache
UpperCamelCase : Any = self.feed_forward_proj.split("""-""" )
UpperCamelCase : Optional[int] = act_info[-1]
UpperCamelCase : Union[str, Any] = act_info[0] == """gated"""
if len(SCREAMING_SNAKE_CASE_ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE_ ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
UpperCamelCase : List[str] = """gelu_new"""
@property
def a_ ( self ):
return self.d_model
@property
def a_ ( self ):
return self.num_heads
@property
def a_ ( self ):
return self.num_layers
class lowerCamelCase ( _UpperCAmelCase ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def a_ ( self ):
UpperCamelCase : Any = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
UpperCamelCase : Tuple = """past_encoder_sequence + sequence"""
UpperCamelCase : Optional[Any] = {0: """batch"""}
UpperCamelCase : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
UpperCamelCase : Dict = {0: """batch""", 1: """decoder_sequence"""}
UpperCamelCase : int = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def a_ ( self ):
return 13
@property
def a_ ( self ):
return 5e-4
| 27
|
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class lowerCamelCase ( _UpperCAmelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=3.6 ):
UpperCamelCase : Dict = tokenizer
UpperCamelCase : Optional[Any] = tokenizer.bos_token_id
UpperCamelCase : Any = dataset
UpperCamelCase : List[str] = seq_length
UpperCamelCase : Optional[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
UpperCamelCase : Dict = iter(self.dataset )
UpperCamelCase : Union[str, Any] = True
while more_examples:
UpperCamelCase , UpperCamelCase : Tuple = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(SCREAMING_SNAKE_CASE_ )["""content"""] )
buffer_len += len(buffer[-1] )
except StopIteration:
UpperCamelCase : Dict = False
break
UpperCamelCase : str = tokenizer(SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )["""input_ids"""]
UpperCamelCase : str = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , self.seq_length ):
UpperCamelCase : List[str] = all_token_ids[i : i + self.seq_length]
if len(SCREAMING_SNAKE_CASE_ ) == self.seq_length:
yield torch.tensor(SCREAMING_SNAKE_CASE_ )
def A_ ( snake_case_ : List[Any] ):
'''simple docstring'''
UpperCamelCase : Dict = {"""streaming""": True}
UpperCamelCase : Optional[int] = load_dataset(args.dataset_name ,split="""train""" ,**snake_case_ )
UpperCamelCase : Optional[int] = ConstantLengthDataset(snake_case_ ,snake_case_ ,seq_length=args.seq_length )
UpperCamelCase : List[Any] = DataLoader(snake_case_ ,batch_size=args.batch_size )
return eval_dataloader
def A_ ( snake_case_ : Optional[Any] ):
'''simple docstring'''
model.eval()
UpperCamelCase : Dict = []
for step, batch in enumerate(snake_case_ ):
with torch.no_grad():
UpperCamelCase : List[Any] = model(snake_case_ ,labels=snake_case_ )
UpperCamelCase : Any = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(snake_case_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
UpperCamelCase : Dict = torch.mean(torch.cat(snake_case_ ) )
try:
UpperCamelCase : Dict = torch.exp(snake_case_ )
except OverflowError:
UpperCamelCase : Optional[int] = float("""inf""" )
return loss.item(), perplexity.item()
# Setup Accelerator
__A : List[Any] = Accelerator()
# Parse configuration
__A : str = HfArgumentParser(EvaluationArguments)
__A : List[Any] = parser.parse_args()
set_seed(args.seed)
# Logging
__A : Any = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
__A : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__A : List[Any] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__A : int = create_dataloader(args)
# Prepare everything with our `accelerator`.
__A , __A : Optional[Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
__A , __A : Tuple = evaluate(args)
logger.info(F'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 27
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 82
|
'''simple docstring'''
import functools
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : str ):
A__ = len(_lowerCamelCase )
A__ = len(_lowerCamelCase )
@functools.cache
def min_distance(_lowerCamelCase : int , _lowerCamelCase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
A__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _lowerCamelCase ) , 1 + min_distance(_lowerCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237
| 0
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
SCREAMING_SNAKE_CASE = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
SCREAMING_SNAKE_CASE = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
SCREAMING_SNAKE_CASE = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def __magic_name__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def __magic_name__ ( self : Optional[Any] , snake_case_ : Any , snake_case_ : str ) -> Dict:
'''simple docstring'''
A__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
A__ = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
A__ = evaluate(dataset=__snake_case , predictions=__snake_case )
return score
| 353
|
"""simple docstring"""
SCREAMING_SNAKE_CASE = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
SCREAMING_SNAKE_CASE = [{"type": "code", "content": INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 230
| 0
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __A:
snake_case_ = 42
snake_case_ = None
# Automatically constructed
snake_case_ = "dict"
snake_case_ = None
snake_case_ = field(default='''Translation''' , init=a , repr=a )
def __call__( self ) -> Tuple:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __A:
snake_case_ = None
snake_case_ = None
snake_case_ = None
# Automatically constructed
snake_case_ = "dict"
snake_case_ = None
snake_case_ = field(default='''TranslationVariableLanguages''' , init=a , repr=a )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = sorted(set(self.languages ) ) if self.languages else None
__a = len(self.languages ) if self.languages else None
def __call__( self ) -> Any:
'''simple docstring'''
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str:
'''simple docstring'''
__a = set(self.languages )
if self.languages and set(_snake_case ) - lang_set:
raise ValueError(
F"""Some languages in example ({', '.join(sorted(set(_snake_case ) - lang_set ) )}) are not in valid set ({', '.join(_snake_case )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__a = []
for lang, text in translation_dict.items():
if isinstance(_snake_case , _snake_case ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__a , __a = zip(*sorted(_snake_case ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE_ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 6
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A( a ):
snake_case_ = ['''image_processor''', '''tokenizer''']
snake_case_ = '''ChineseCLIPImageProcessor'''
snake_case_ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> Tuple:
'''simple docstring'''
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
__a = self.image_processor
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Optional[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__a = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
__a = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
__a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _snake_case , )
return self.image_processor_class
| 6
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : List[Any] ={
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] =['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] =[
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCamelCase : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : Any = len(__lowerCAmelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCamelCase__ : Optional[int] = i + 1
else:
UpperCamelCase__ : Dict = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 196
| 0
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a : Union[str, Any] = logging.get_logger(__name__)
_a : List[Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_a : Optional[int] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
_a : Tuple = {'facebook/blenderbot-3B': 128}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = VOCAB_FILES_NAMES
_UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Union[str, Any] = ["input_ids", "attention_mask"]
_UpperCamelCase : Optional[int] = BlenderbotTokenizer
def __init__( self , a__=None , a__=None , a__=None , a__="replace" , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__=False , a__=True , **a__ , ):
super().__init__(
a__ , a__ , tokenizer_file=a__ , errors=a__ , bos_token=a__ , eos_token=a__ , sep_token=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , add_prefix_space=a__ , trim_offsets=a__ , **a__ , )
_lowerCAmelCase : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , a__ ) != add_prefix_space:
_lowerCAmelCase : int = getattr(a__ , pre_tok_state.pop("""type""" ) )
_lowerCAmelCase : Optional[Any] = add_prefix_space
_lowerCAmelCase : List[Any] = pre_tok_class(**a__ )
_lowerCAmelCase : List[str] = add_prefix_space
_lowerCAmelCase : Dict = """post_processor"""
_lowerCAmelCase : List[Any] = getattr(self.backend_tokenizer , a__ , a__ )
if tokenizer_component_instance:
_lowerCAmelCase : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase : List[Any] = tuple(state["""sep"""] )
if "cls" in state:
_lowerCAmelCase : Tuple = tuple(state["""cls"""] )
_lowerCAmelCase : Union[str, Any] = False
if state.get("""add_prefix_space""" , a__ ) != add_prefix_space:
_lowerCAmelCase : Any = add_prefix_space
_lowerCAmelCase : Optional[Any] = True
if state.get("""trim_offsets""" , a__ ) != trim_offsets:
_lowerCAmelCase : Any = trim_offsets
_lowerCAmelCase : List[str] = True
if changes_to_apply:
_lowerCAmelCase : Union[str, Any] = getattr(a__ , state.pop("""type""" ) )
_lowerCAmelCase : Union[str, Any] = component_class(**a__ )
setattr(self.backend_tokenizer , a__ , a__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __A ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __A ( self , a__ ):
_lowerCAmelCase : Optional[Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else value
_lowerCAmelCase : List[str] = value
def __A ( self , *a__ , **a__ ):
_lowerCAmelCase : int = kwargs.get("""is_split_into_words""" , a__ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a__ , **a__ )
def __A ( self , *a__ , **a__ ):
_lowerCAmelCase : str = kwargs.get("""is_split_into_words""" , a__ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a__ , **a__ )
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , a__ , a__ = None ):
return token_ids_a + [self.eos_token_id]
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(a__ )
_lowerCAmelCase : Union[str, Any] = """ """.join(a__ )
_lowerCAmelCase : Tuple = self.encode(a__ )
if len(a__ ) > self.model_max_length:
_lowerCAmelCase : List[Any] = input_ids[-self.model_max_length :]
logger.warning(F"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids
| 44
|
"""simple docstring"""
import os
from math import logaa
def _lowercase ( __snake_case = "base_exp.txt" ) -> int:
__lowerCAmelCase : float = 0
__lowerCAmelCase : Any = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ) ,__snake_case ) ) ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = list(map(__snake_case ,line.split("," ) ) )
if x * logaa(__snake_case ) > largest:
__lowerCAmelCase : Tuple = x * logaa(__snake_case )
__lowerCAmelCase : Optional[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 269
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "roberta"
def __init__( self , snake_case__=5_0265 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
_lowerCAmelCase : List[str] = vocab_size
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = max_position_embeddings
_lowerCAmelCase : str = type_vocab_size
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : str = layer_norm_eps
_lowerCAmelCase : Optional[int] = position_embedding_type
_lowerCAmelCase : Optional[Any] = use_cache
_lowerCAmelCase : Dict = classifier_dropout
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def a ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 370
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Any = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = NllbTokenizer
__magic_name__ = []
__magic_name__ = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
_lowerCAmelCase : Dict = legacy_behaviour
super().__init__(
vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : int = False if not self.vocab_file else True
_lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_lowerCAmelCase : Any = {
lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn'
_lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase : Optional[Any] = src_lang
_lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
_lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ )
_lowerCAmelCase : Optional[Any] = tgt_lang_id
return inputs
def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def a ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def a ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : int = [self.eos_token_id]
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : List[str] = [self.eos_token_id]
_lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 25
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__lowercase : Union[str, Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__lowercase : int = TaTokenizerFast
__lowercase : int = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__lowercase : Optional[int] = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 27
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Tuple = logging.get_logger(__name__)
__lowercase : List[Any] = torch.device('cpu')
def lowerCamelCase ():
__a : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__a : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : int = dct.pop(_SCREAMING_SNAKE_CASE )
__a : Tuple = val
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : Dict = []
for k in state_dict.keys():
__a : List[Any] = k
if ".pwconv" in k:
__a : List[Any] = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
__a : Dict = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
__a : Optional[int] = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
__a : List[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
__a : Union[str, Any] = k_new.split('.' )
if ls[2].isdigit():
__a : Union[str, Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
__a : Union[str, Any] = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : Union[str, Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__a : List[str] = 1_000
__a : Tuple = 'huggingface/label-files'
__a : str = 'imagenet-1k-id2label.json'
__a : Dict = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__a : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a : Any = idalabel
__a : str = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__a : Dict = [3, 3, 6, 4]
__a : int = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__a : Dict = [3, 3, 9, 6]
__a : List[str] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__a : Dict = [4, 3, 10, 5]
__a : Optional[int] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__a : Tuple = [4, 4, 12, 6]
__a : Dict = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
__a : List[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE )
else:
__a : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
__a : Optional[Any] = checkpoint
__a : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
__a : Tuple = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
# prepare test inputs
__a : Tuple = prepare_img()
__a : str = ViTImageProcessor.from_pretrained('preprocessor_config' )
__a : Tuple = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# compare outputs from both models
__a : List[Any] = get_expected_output(_SCREAMING_SNAKE_CASE )
__a : Dict = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
__lowercase : Tuple = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 27
| 1
|
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
snake_case__ : Dict = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
snake_case__ : Tuple = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
snake_case__ : Any = '''zero2'''
snake_case__ : Optional[Any] = '''zero3'''
snake_case__ : Any = [ZEROa, ZEROa]
def _snake_case ( _snake_case : List[str] , _snake_case : Tuple , _snake_case : Union[str, Any] ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
lowerCAmelCase : Dict = parameterized.to_safe_name('''_'''.join(str(_snake_case ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
snake_case__ : List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class snake_case_( a__ ):
@parameterized.expand(UpperCamelCase_ , name_func=UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] ):
self.run_and_check(
stage=UpperCamelCase_ , model=UpperCamelCase_ , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCamelCase_ , name_func=UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ):
self.run_and_check(
stage=UpperCamelCase_ , model=UpperCamelCase_ , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , )
@parameterized.expand(UpperCamelCase_ , name_func=UpperCamelCase_ )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] ):
self.run_and_check(
stage=UpperCamelCase_ , model=UpperCamelCase_ , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(UpperCamelCase_ , name_func=UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any ):
self.run_and_check(
stage=UpperCamelCase_ , model=UpperCamelCase_ , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Optional[Any] ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , ):
lowerCAmelCase : List[str] = models[model]
lowerCAmelCase : Dict = self.run_trainer(
stage=UpperCamelCase_ , model_name=UpperCamelCase_ , eval_steps=UpperCamelCase_ , num_train_epochs=1 , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , )
self.do_checks(UpperCamelCase_ )
return output_dir
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : int = 1 , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , ):
lowerCAmelCase : int = self.get_auto_remove_tmp_dir('''./xxx''' , after=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(UpperCamelCase_ )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
lowerCAmelCase : Optional[Any] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
lowerCAmelCase : Any = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
lowerCAmelCase : Dict = self.get_launcher(UpperCamelCase_ )
lowerCAmelCase : List[Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCamelCase_ , env=self.get_env() )
return output_dir
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : Dict=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
lowerCAmelCase : List[str] = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 314
|
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class snake_case_( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Tuple , UpperCamelCase_ : float , UpperCamelCase_ : Callable , UpperCamelCase_ : int , UpperCamelCase_ : float = 1.0 , UpperCamelCase_ : str = None , ):
super().__init__()
lowerCAmelCase : Dict = initial_learning_rate
lowerCAmelCase : List[str] = warmup_steps
lowerCAmelCase : Union[str, Any] = power
lowerCAmelCase : Dict = decay_schedule_fn
lowerCAmelCase : str = name
def __call__( self : Dict , UpperCamelCase_ : Optional[Any] ):
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCAmelCase : Dict = tf.cast(UpperCamelCase_ , tf.floataa )
lowerCAmelCase : List[Any] = tf.cast(self.warmup_steps , tf.floataa )
lowerCAmelCase : str = global_step_float / warmup_steps_float
lowerCAmelCase : Any = self.initial_learning_rate * tf.math.pow(UpperCamelCase_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase_ , )
def lowerCamelCase__ ( self : str ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _snake_case ( _snake_case : float , _snake_case : int , _snake_case : int , _snake_case : float = 0.0 , _snake_case : float = 0.9 , _snake_case : float = 0.999 , _snake_case : float = 1E-8 , _snake_case : Optional[float] = None , _snake_case : Optional[float] = None , _snake_case : float = 0.0 , _snake_case : float = 1.0 , _snake_case : Optional[List[str]] = None , ):
lowerCAmelCase : Dict = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_snake_case , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_snake_case , )
if num_warmup_steps:
lowerCAmelCase : List[str] = WarmUp(
initial_learning_rate=_snake_case , decay_schedule_fn=_snake_case , warmup_steps=_snake_case , )
if weight_decay_rate > 0.0:
lowerCAmelCase : Dict = AdamWeightDecay(
learning_rate=_snake_case , weight_decay_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=_snake_case , )
else:
lowerCAmelCase : Any = tf.keras.optimizers.Adam(
learning_rate=_snake_case , beta_a=_snake_case , beta_a=_snake_case , epsilon=_snake_case , clipnorm=_snake_case , global_clipnorm=_snake_case , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class snake_case_( a__ ):
def __init__( self : Optional[int] , UpperCamelCase_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCamelCase_ : float = 0.9 , UpperCamelCase_ : float = 0.999 , UpperCamelCase_ : float = 1E-7 , UpperCamelCase_ : bool = False , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : str = "AdamWeightDecay" , **UpperCamelCase_ : List[Any] , ):
super().__init__(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Tuple = weight_decay_rate
lowerCAmelCase : List[str] = include_in_weight_decay
lowerCAmelCase : Union[str, Any] = exclude_from_weight_decay
@classmethod
def lowerCamelCase__ ( cls : int , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : Tuple = {'''WarmUp''': WarmUp}
return super(UpperCamelCase_ , cls ).from_config(UpperCamelCase_ , custom_objects=UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple ):
super(UpperCamelCase_ , self )._prepare_local(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Any = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : List[Any] ):
lowerCAmelCase, lowerCAmelCase : List[Any] = list(zip(*UpperCamelCase_ ) )
return super(UpperCamelCase_ , self ).apply_gradients(zip(UpperCamelCase_ , UpperCamelCase_ ) , name=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCAmelCase : Dict = apply_state or {}
lowerCAmelCase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCAmelCase : Optional[Any] = self._fallback_apply_state(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=None ):
lowerCAmelCase, lowerCAmelCase : Any = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
lowerCAmelCase : List[str] = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_dense(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]=None ):
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase_ )
lowerCAmelCase : Tuple = self._decay_weights_op(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase_ , self )._resource_apply_sparse(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : str = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[str] ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCamelCase_ , UpperCamelCase_ ) is not None:
return False
return True
class snake_case_( a__ ):
def __init__( self : Any ):
lowerCAmelCase : Any = []
lowerCAmelCase : List[str] = None
@property
def lowerCamelCase__ ( self : List[str] ):
if self._accum_steps is None:
lowerCAmelCase : Optional[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowerCamelCase__ ( self : Any ):
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Optional[Any] , UpperCamelCase_ : List[Any] ):
if not self._gradients:
lowerCAmelCase : Any = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCamelCase_ ) , trainable=UpperCamelCase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCamelCase_ ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase_ )}''' )
for accum_gradient, gradient in zip(self._gradients , UpperCamelCase_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCamelCase_ )
self._accum_steps.assign_add(1 )
def lowerCamelCase__ ( self : Union[str, Any] ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCamelCase_ ) )
| 314
| 1
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
return x + 2
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Any = '''x = 3'''
snake_case__ : Any = {}
snake_case__ : Optional[int] = evaluate(__lowercase ,{} ,state=__lowercase )
assert result == 3
self.assertDictEqual(__lowercase ,{'''x''': 3} )
snake_case__ : Dict = '''x = y'''
snake_case__ : int = {'''y''': 5}
snake_case__ : Dict = evaluate(__lowercase ,{} ,state=__lowercase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__lowercase ,{'''x''': 5, '''y''': 5} )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : List[str] = '''y = add_two(x)'''
snake_case__ : List[str] = {'''x''': 3}
snake_case__ : Optional[Any] = evaluate(__lowercase ,{'''add_two''': add_two} ,state=__lowercase )
assert result == 5
self.assertDictEqual(__lowercase ,{'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
snake_case__ : Union[str, Any] = evaluate(__lowercase ,{} ,state=__lowercase )
assert result is None
assert "tried to execute add_two" in out.out
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Any = '''x = 3'''
snake_case__ : List[str] = {}
snake_case__ : str = evaluate(__lowercase ,{} ,state=__lowercase )
assert result == 3
self.assertDictEqual(__lowercase ,{'''x''': 3} )
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Tuple = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
snake_case__ : Union[str, Any] = {'''x''': 3}
snake_case__ : List[str] = evaluate(__lowercase ,{'''add_two''': add_two} ,state=__lowercase )
self.assertDictEqual(__lowercase ,{'''x''': 3, '''y''': 5} )
self.assertDictEqual(__lowercase ,{'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : List[Any] = '''x = 3\ny = 5'''
snake_case__ : Optional[Any] = {}
snake_case__ : str = evaluate(__lowercase ,{} ,state=__lowercase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__lowercase ,{'''x''': 3, '''y''': 5} )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Any = '''text = f\'This is x: {x}.\''''
snake_case__ : Optional[int] = {'''x''': 3}
snake_case__ : Optional[int] = evaluate(__lowercase ,{} ,state=__lowercase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__lowercase ,{'''x''': 3, '''text''': '''This is x: 3.'''} )
def __lowerCamelCase ( self :str ):
snake_case__ : Any = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
snake_case__ : List[Any] = {'''x''': 3}
snake_case__ : List[str] = evaluate(__lowercase ,{} ,state=__lowercase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__lowercase ,{'''x''': 3, '''y''': 2} )
snake_case__ : Dict = {'''x''': 8}
snake_case__ : Optional[Any] = evaluate(__lowercase ,{} ,state=__lowercase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__lowercase ,{'''x''': 8, '''y''': 5} )
def __lowerCamelCase ( self :Dict ):
snake_case__ : Dict = '''test_list = [x, add_two(x)]'''
snake_case__ : Dict = {'''x''': 3}
snake_case__ : str = evaluate(__lowercase ,{'''add_two''': add_two} ,state=__lowercase )
self.assertListEqual(__lowercase ,[3, 5] )
self.assertDictEqual(__lowercase ,{'''x''': 3, '''test_list''': [3, 5]} )
def __lowerCamelCase ( self :str ):
snake_case__ : List[Any] = '''y = x'''
snake_case__ : List[Any] = {'''x''': 3}
snake_case__ : str = evaluate(__lowercase ,{} ,state=__lowercase )
assert result == 3
self.assertDictEqual(__lowercase ,{'''x''': 3, '''y''': 3} )
def __lowerCamelCase ( self :str ):
snake_case__ : Dict = '''test_list = [x, add_two(x)]\ntest_list[1]'''
snake_case__ : Optional[Any] = {'''x''': 3}
snake_case__ : Any = evaluate(__lowercase ,{'''add_two''': add_two} ,state=__lowercase )
assert result == 5
self.assertDictEqual(__lowercase ,{'''x''': 3, '''test_list''': [3, 5]} )
snake_case__ : List[str] = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
snake_case__ : Dict = {'''x''': 3}
snake_case__ : Union[str, Any] = evaluate(__lowercase ,{'''add_two''': add_two} ,state=__lowercase )
assert result == 5
self.assertDictEqual(__lowercase ,{'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Tuple = '''x = 0\nfor i in range(3):\n x = i'''
snake_case__ : Tuple = {}
snake_case__ : List[Any] = evaluate(__lowercase ,{'''range''': range} ,state=__lowercase )
assert result == 2
self.assertDictEqual(__lowercase ,{'''x''': 2, '''i''': 2} )
| 230
|
import random
from .binary_exp_mod import bin_exp_mod
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=1000 ) -> str:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
snake_case__ : Tuple = n - 1
snake_case__ : Tuple = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
snake_case__ : List[str] = 0
while count < prec:
snake_case__ : List[str] = random.randint(2 , n - 1 )
snake_case__ : Optional[Any] = bin_exp_mod(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if b != 1:
snake_case__ : List[Any] = True
for _ in range(__lowerCAmelCase ):
if b == n - 1:
snake_case__ : List[str] = False
break
snake_case__ : str = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
A__ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 230
| 1
|
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _A (__a = "isbn/0140328726" ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
SCREAMING_SNAKE_CASE_ : Optional[int] = f'{olid} is not a valid Open Library olid'
raise ValueError(lowercase__ )
return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json()
def _A (__a ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
SCREAMING_SNAKE_CASE_ : Any = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
SCREAMING_SNAKE_CASE_ : int = [
get_openlibrary_data(author['''key'''] )["""name"""] for author in data["""Authors"""]
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : int = """, """.join(lowercase__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
UpperCAmelCase_ : Optional[int] = input("""\nEnter the ISBN code to search (or \'quit\' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
UpperCAmelCase_ : int = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print("""\n""".join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 351
|
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : Dict[str, int] , lowercase_ : List[str] , lowercase_ : int = None , lowercase_ : int = None):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : str = pad_token_id
SCREAMING_SNAKE_CASE_ : Optional[int] = max_length
SCREAMING_SNAKE_CASE_ : Dict = vocab
SCREAMING_SNAKE_CASE_ : Dict = merges
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BytePairTokenizer(lowercase_ , lowercase_ , sequence_length=lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : GPTaTokenizer , *lowercase_ : Optional[Any] , **lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = [''' '''.join(lowercase_) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE_ : str = tokenizer.get_vocab()
return cls(lowercase_ , lowercase_ , *lowercase_ , **lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , lowercase_ : Union[str, os.PathLike] , *lowercase_ : List[str] , **lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = GPTaTokenizer.from_pretrained(lowercase_ , *lowercase_ , **lowercase_)
return cls.from_tokenizer(lowercase_ , *lowercase_ , **lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : List[Any]):
'''simple docstring'''
return cls(**lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[Any] , lowercase_ : int = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.tf_tokenizer(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.ones_like(lowercase_)
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = pad_model_inputs(
lowercase_ , max_seq_length=lowercase_ , pad_value=self.pad_token_id)
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 318
| 0
|
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'detr'
SCREAMING_SNAKE_CASE : List[Any] = ['past_key_values']
SCREAMING_SNAKE_CASE : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple ,lowercase__ : int=True ,lowercase__ : Union[str, Any]=None ,lowercase__ : Optional[Any]=3 ,lowercase__ : Optional[Any]=1_0_0 ,lowercase__ : Optional[Any]=6 ,lowercase__ : Optional[int]=2_0_4_8 ,lowercase__ : List[str]=8 ,lowercase__ : Optional[int]=6 ,lowercase__ : Dict=2_0_4_8 ,lowercase__ : List[Any]=8 ,lowercase__ : Optional[int]=0.0 ,lowercase__ : str=0.0 ,lowercase__ : List[str]=True ,lowercase__ : Union[str, Any]="relu" ,lowercase__ : List[str]=2_5_6 ,lowercase__ : Optional[int]=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0 ,lowercase__ : Dict=0.0_2 ,lowercase__ : Tuple=1.0 ,lowercase__ : List[str]=False ,lowercase__ : Optional[int]="sine" ,lowercase__ : Any="resnet50" ,lowercase__ : str=True ,lowercase__ : Any=False ,lowercase__ : Tuple=1 ,lowercase__ : Optional[int]=5 ,lowercase__ : Any=2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : Dict=1 ,lowercase__ : List[str]=5 ,lowercase__ : int=2 ,lowercase__ : Union[str, Any]=0.1 ,**lowercase__ : Tuple ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__lowercase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowercase__ ,lowercase__ ):
__lowercase = backbone_config.get('''model_type''' )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(lowercase__ )
# set timm attributes to None
__lowercase , __lowercase , __lowercase = None, None, None
__lowercase = use_timm_backbone
__lowercase = backbone_config
__lowercase = num_channels
__lowercase = num_queries
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = encoder_layers
__lowercase = auxiliary_loss
__lowercase = position_embedding_type
__lowercase = backbone
__lowercase = use_pretrained_backbone
__lowercase = dilation
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = mask_loss_coefficient
__lowercase = dice_loss_coefficient
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
super().__init__(is_encoder_decoder=lowercase__ ,**lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return self.d_model
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] ,lowercase__ : PretrainedConfig ,**lowercase__ : int ):
return cls(backbone_config=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return 1e-5
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
return 1_2
| 104
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class __a ( __UpperCamelCase ):
def __init__( self , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: int = {}
if "candidate_labels" in kwargs:
lowercase__: Dict = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
lowercase__: List[Any] = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__="This is a photo of {}." ) -> int:
'''simple docstring'''
lowercase__: Optional[int] = load_image(lowerCAmelCase__ )
lowercase__: Dict = self.image_processor(images=[image] , return_tensors=self.framework )
lowercase__: Tuple = candidate_labels
lowercase__: List[str] = [hypothesis_template.format(lowerCAmelCase__ ) for x in candidate_labels]
lowercase__: Optional[Any] = self.tokenizer(lowerCAmelCase__ , return_tensors=self.framework , padding=lowerCAmelCase__ )
lowercase__: str = [text_inputs]
return inputs
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: str = model_inputs.pop('candidate_labels' )
lowercase__: List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , lowerCAmelCase__ ):
lowercase__: Any = text_inputs[0]
else:
# Batching case.
lowercase__: Optional[int] = text_inputs[0][0]
lowercase__: Tuple = self.model(**lowerCAmelCase__ , **lowerCAmelCase__ )
lowercase__: Any = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Tuple = model_outputs.pop('candidate_labels' )
lowercase__: Dict = model_outputs['logits'][0]
if self.framework == "pt":
lowercase__: Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__: Dict = probs.tolist()
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: Dict = [scores]
elif self.framework == "tf":
lowercase__: Optional[int] = stable_softmax(lowerCAmelCase__ , axis=-1 )
lowercase__: Union[str, Any] = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
lowercase__: List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , key=lambda lowerCAmelCase__ : -x[0] )
]
return result
| 196
| 0
|
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
UpperCamelCase = 3
def lowercase_ ( _lowerCamelCase : int):
print("Generating primitive root of p")
while True:
lowercase__ : Any = random.randrange(3 , _lowerCamelCase)
if pow(_lowerCamelCase , 2 , _lowerCamelCase) == 1:
continue
if pow(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) == 1:
continue
return g
def lowercase_ ( _lowerCamelCase : int):
print("Generating prime p...")
lowercase__ : Optional[Any] = rabin_miller.generate_large_prime(_lowerCamelCase) # select large prime number.
lowercase__ : List[Any] = primitive_root(_lowerCamelCase) # one primitive root on modulo p.
lowercase__ : Optional[int] = random.randrange(3 , _lowerCamelCase) # private_key -> have to be greater than 2 for safety.
lowercase__ : List[Any] = cryptomath.find_mod_inverse(pow(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) , _lowerCamelCase)
lowercase__ : Tuple = (key_size, e_a, e_a, p)
lowercase__ : Dict = (key_size, d)
return public_key, private_key
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : int):
if os.path.exists(f'''{name}_pubkey.txt''') or os.path.exists(f'''{name}_privkey.txt'''):
print("\nWARNING:")
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"Use a different name or delete these files and re-run this program.")
sys.exit()
lowercase__ , lowercase__ : Optional[int] = generate_key(_lowerCamelCase)
print(f'''\nWriting public key to file {name}_pubkey.txt...''')
with open(f'''{name}_pubkey.txt''' , "w") as fo:
fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''')
print(f'''Writing private key to file {name}_privkey.txt...''')
with open(f'''{name}_privkey.txt''' , "w") as fo:
fo.write(f'''{private_key[0]},{private_key[1]}''')
def lowercase_ ( ):
print("Making key files...")
make_key_files("elgamal" , 2048)
print("Key files generation successful")
if __name__ == "__main__":
main()
| 333
|
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowercase_ ( _lowerCamelCase : List[str]):
return 1 / (1 + np.exp(-z))
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple):
return (-y * np.log(_lowerCamelCase) - (1 - y) * np.log(1 - h)).mean()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
return np.sum(y * scores - np.log(1 + np.exp(_lowerCamelCase)))
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str=7_0000):
lowercase__ : Optional[int] = np.zeros(x.shape[1])
for iterations in range(_lowerCamelCase):
lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Tuple = sigmoid_function(_lowerCamelCase)
lowercase__ : Dict = np.dot(x.T , h - y) / y.size
lowercase__ : int = theta - alpha * gradient # updating the weights
lowercase__ : List[str] = np.dot(_lowerCamelCase , _lowerCamelCase)
lowercase__ : Union[str, Any] = sigmoid_function(_lowerCamelCase)
lowercase__ : Optional[Any] = cost_function(_lowerCamelCase , _lowerCamelCase)
if iterations % 100 == 0:
print(f'''loss: {j} \t''') # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase = datasets.load_iris()
UpperCamelCase = iris.data[:, :2]
UpperCamelCase = (iris.target != 0) * 1
UpperCamelCase = 0.1
UpperCamelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def lowercase_ ( _lowerCamelCase : List[Any]):
return sigmoid_function(
np.dot(_lowerCamelCase , _lowerCamelCase)) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase) , (UpperCamelCase)) = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase) , (UpperCamelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 333
| 1
|
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class __lowercase ( a__ ):
'''simple docstring'''
__lowerCAmelCase = '''mvp'''
__lowerCAmelCase = ['''past_key_values''']
__lowerCAmelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _UpperCAmelCase=50267 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=4096 , _UpperCAmelCase=16 , _UpperCAmelCase=12 , _UpperCAmelCase=4096 , _UpperCAmelCase=16 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1024 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=100 , _UpperCAmelCase=800 , **_UpperCAmelCase , ):
__a : str = vocab_size
__a : List[str] = max_position_embeddings
__a : Optional[int] = d_model
__a : int = encoder_ffn_dim
__a : Optional[int] = encoder_layers
__a : str = encoder_attention_heads
__a : Any = decoder_ffn_dim
__a : Union[str, Any] = decoder_layers
__a : Any = decoder_attention_heads
__a : Tuple = dropout
__a : Dict = attention_dropout
__a : Tuple = activation_dropout
__a : str = activation_function
__a : List[str] = init_std
__a : str = encoder_layerdrop
__a : Optional[Any] = decoder_layerdrop
__a : Union[str, Any] = classifier_dropout
__a : Dict = use_cache
__a : Tuple = encoder_layers
__a : int = scale_embedding # scale factor will be sqrt(d_model) if True
__a : List[str] = use_prompt
__a : Union[str, Any] = prompt_length
__a : Optional[Any] = prompt_mid_dim
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , forced_eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , SCREAMING_SNAKE_CASE__ ):
__a : List[str] = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
| 160
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = '''audio-spectrogram-transformer'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = frequency_stride
SCREAMING_SNAKE_CASE__ : Any = time_stride
SCREAMING_SNAKE_CASE__ : Optional[int] = max_length
SCREAMING_SNAKE_CASE__ : Any = num_mel_bins
| 25
| 0
|
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
snake_case_ = trt.Logger(trt.Logger.WARNING)
snake_case_ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
snake_case_ = logging.getLogger(__name__)
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--onnx_model_path""",
default=None,
type=str,
required=True,
help="""Path to ONNX model: """,
)
parser.add_argument(
"""--output_dir""",
default=None,
type=str,
required=True,
help="""The output directory where the model checkpoints and predictions will be written.""",
)
# Other parameters
parser.add_argument(
"""--tokenizer_name""",
default="""""",
type=str,
required=True,
help="""Pretrained tokenizer name or path if not the same as model_name""",
)
parser.add_argument(
"""--version_2_with_negative""",
action="""store_true""",
help="""If true, the SQuAD examples contain some that do not have an answer.""",
)
parser.add_argument(
"""--null_score_diff_threshold""",
type=float,
default=0.0,
help="""If null_score - best_non_null is greater than the threshold predict null.""",
)
parser.add_argument(
"""--max_seq_length""",
default=384,
type=int,
help=(
"""The maximum total input sequence length after WordPiece tokenization. Sequences """
"""longer than this will be truncated, and sequences shorter than this will be padded."""
),
)
parser.add_argument(
"""--doc_stride""",
default=128,
type=int,
help="""When splitting up a long document into chunks, how much stride to take between chunks.""",
)
parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""")
parser.add_argument(
"""--n_best_size""",
default=20,
type=int,
help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""",
)
parser.add_argument(
"""--max_answer_length""",
default=30,
type=int,
help=(
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
),
)
parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""")
parser.add_argument(
"""--dataset_name""",
type=str,
default=None,
required=True,
help="""The name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--dataset_config_name""",
type=str,
default=None,
help="""The configuration name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data."""
)
parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--fp16""",
action="""store_true""",
help="""Whether to use 16-bit (mixed) precision instead of 32-bit""",
)
parser.add_argument(
"""--int8""",
action="""store_true""",
help="""Whether to use INT8""",
)
snake_case_ = parser.parse_args()
if args.tokenizer_name:
snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name."""
)
logger.info("""Training/evaluation parameters %s""", args)
snake_case_ = args.per_device_eval_batch_size
snake_case_ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
snake_case_ = True
snake_case_ = """temp_engine/bert-fp32.engine"""
if args.fpaa:
snake_case_ = """temp_engine/bert-fp16.engine"""
if args.inta:
snake_case_ = """temp_engine/bert-int8.engine"""
# import ONNX file
if not os.path.exists("""temp_engine"""):
os.makedirs("""temp_engine""")
snake_case_ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, """rb""") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
snake_case_ = [network.get_input(i) for i in range(network.num_inputs)]
snake_case_ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
snake_case_ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
snake_case_ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
snake_case_ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, """wb""") as f:
f.write(engine.serialize())
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = np.asarray(inputs['input_ids'] , dtype=np.intaa )
UpperCAmelCase = np.asarray(inputs['attention_mask'] , dtype=np.intaa )
UpperCAmelCase = np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , a_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , a_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , a_ )
# start time
UpperCAmelCase = time.time()
# Run inference
context.execute_async(
bindings=[int(a_ ) for d_inp in d_inputs] + [int(a_ ), int(a_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(a_ , a_ , a_ )
cuda.memcpy_dtoh_async(a_ , a_ , a_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
UpperCAmelCase = time.time()
UpperCAmelCase = end_time - start_time
UpperCAmelCase = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
snake_case_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case_ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("""Evaluation requires a dataset name""")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
snake_case_ = raw_datasets["""validation"""].column_names
snake_case_ = """question""" if """question""" in column_names else column_names[0]
snake_case_ = """context""" if """context""" in column_names else column_names[1]
snake_case_ = """answers""" if """answers""" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
snake_case_ = tokenizer.padding_side == """right"""
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
snake_case_ = min(args.max_seq_length, tokenizer.model_max_length)
def _lowerCAmelCase ( lowercase_ ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
UpperCAmelCase = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
UpperCAmelCase = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=a_ , stride=args.doc_stride , return_overflowing_tokens=a_ , return_offsets_mapping=a_ , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
UpperCAmelCase = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
UpperCAmelCase = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
UpperCAmelCase = tokenized_examples.sequence_ids(a_ )
UpperCAmelCase = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
UpperCAmelCase = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
UpperCAmelCase = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
snake_case_ = raw_datasets["""validation"""]
# Validation Feature Creation
snake_case_ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="""Running tokenizer on validation dataset""",
)
snake_case_ = default_data_collator
snake_case_ = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""])
snake_case_ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
UpperCAmelCase = postprocess_qa_predictions(
examples=a_ , features=a_ , predictions=a_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=a_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
UpperCAmelCase = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
UpperCAmelCase = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
UpperCAmelCase = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=a_ , label_ids=a_ )
snake_case_ = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""")
# Evaluation!
logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path)
with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _lowerCAmelCase ( lowercase_ ):
return trt.volume(engine.get_binding_shape(a_ ) ) * engine.get_binding_dtype(a_ ).itemsize
# Allocate device memory for inputs and outputs.
snake_case_ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
snake_case_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
snake_case_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
snake_case_ = cuda.mem_alloc(h_outputa.nbytes)
snake_case_ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
snake_case_ = cuda.Stream()
# Evaluation
logger.info("""***** Running Evaluation *****""")
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
snake_case_ = 0.0
snake_case_ = 0
snake_case_ = timeit.default_timer()
snake_case_ = None
for step, batch in enumerate(eval_dataloader):
snake_case_ , snake_case_ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
snake_case_ , snake_case_ = outputs
snake_case_ = torch.tensor(start_logits)
snake_case_ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
snake_case_ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
snake_case_ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
snake_case_ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
snake_case_ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
snake_case_ = nested_truncate(all_preds, len(eval_dataset))
snake_case_ = timeit.default_timer() - start_time
logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1000 / niter))
logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1000))
logger.info("""Total Number of Inference = %d""", niter)
snake_case_ = post_processing_function(eval_examples, eval_dataset, all_preds)
snake_case_ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 364
|
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowercase_ :CLIPSegForImageSegmentation , lowercase_ :CLIPSegProcessor , lowercase_ :AutoencoderKL , lowercase_ :CLIPTextModel , lowercase_ :CLIPTokenizer , lowercase_ :UNetaDConditionModel , lowercase_ :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowercase_ :StableDiffusionSafetyChecker , lowercase_ :CLIPImageProcessor , ) -> List[str]:
super().__init__()
if hasattr(scheduler.config , 'steps_offset' ) and scheduler.config.steps_offset != 1:
UpperCAmelCase = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('steps_offset!=1' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
UpperCAmelCase = dict(scheduler.config )
UpperCAmelCase = 1
UpperCAmelCase = FrozenDict(lowercase_ )
if hasattr(scheduler.config , 'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('skip_prk_steps not set' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
UpperCAmelCase = dict(scheduler.config )
UpperCAmelCase = True
UpperCAmelCase = FrozenDict(lowercase_ )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
segmentation_model=lowercase_ , segmentation_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[Union[str, int]] = "auto" ) -> int:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] ) -> List[str]:
self.enable_attention_slicing(lowercase_ )
def UpperCAmelCase__ ( self :int ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase = torch.device('cuda' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[str]:
if self.device != torch.device('meta' ) or not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self :Optional[Any] , lowercase_ :Union[str, List[str]] , lowercase_ :Union[torch.FloatTensor, PIL.Image.Image] , lowercase_ :str , lowercase_ :int = 5_12 , lowercase_ :int = 5_12 , lowercase_ :int = 50 , lowercase_ :float = 7.5 , lowercase_ :Optional[Union[str, List[str]]] = None , lowercase_ :Optional[int] = 1 , lowercase_ :float = 0.0 , lowercase_ :Optional[torch.Generator] = None , lowercase_ :Optional[torch.FloatTensor] = None , lowercase_ :Optional[str] = "pil" , lowercase_ :bool = True , lowercase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ :int = 1 , **lowercase_ :int , ) -> int:
UpperCAmelCase = self.segmentation_processor(
text=[text] , images=[image] , padding='max_length' , return_tensors='pt' ).to(self.device )
UpperCAmelCase = self.segmentation_model(**lowercase_ )
UpperCAmelCase = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase = self.numpy_to_pil(lowercase_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , )
| 181
| 0
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_SCREAMING_SNAKE_CASE : str = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_SCREAMING_SNAKE_CASE : Any = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_SCREAMING_SNAKE_CASE : Optional[int] = '''zero2'''
_SCREAMING_SNAKE_CASE : Optional[Any] = '''zero3'''
_SCREAMING_SNAKE_CASE : Optional[Any] = [ZEROa, ZEROa]
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parameterized.to_safe_name('''_'''.join(str(_A ) for x in param.args ) )
return F'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
_SCREAMING_SNAKE_CASE : Optional[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
@parameterized.expand(__lowerCamelCase , name_func=__lowerCamelCase )
def lowercase_ ( self : int , __lowerCamelCase : int , __lowerCamelCase : Any ) -> Any:
self.run_and_check(
stage=__lowerCamelCase , model=__lowerCamelCase , distributed=__lowerCamelCase , fpaa=__lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(__lowerCamelCase , name_func=__lowerCamelCase )
def lowercase_ ( self : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ) -> Union[str, Any]:
self.run_and_check(
stage=__lowerCamelCase , model=__lowerCamelCase , distributed=__lowerCamelCase , fpaa=__lowerCamelCase , )
@parameterized.expand(__lowerCamelCase , name_func=__lowerCamelCase )
def lowercase_ ( self : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ) -> List[Any]:
self.run_and_check(
stage=__lowerCamelCase , model=__lowerCamelCase , distributed=__lowerCamelCase , fpaa=__lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(__lowerCamelCase , name_func=__lowerCamelCase )
def lowercase_ ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ) -> Optional[int]:
self.run_and_check(
stage=__lowerCamelCase , model=__lowerCamelCase , distributed=__lowerCamelCase , fpaa=__lowerCamelCase , )
def lowercase_ ( self : List[str] , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def lowercase_ ( self : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int = 10 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = models[model]
SCREAMING_SNAKE_CASE__ = self.run_trainer(
stage=__lowerCamelCase , model_name=__lowerCamelCase , eval_steps=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , fpaa=__lowerCamelCase , )
self.do_checks(__lowerCamelCase )
return output_dir
def lowercase_ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : int = 10 , __lowerCamelCase : int = 1 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , ) -> int:
SCREAMING_SNAKE_CASE__ = self.get_auto_remove_tmp_dir('''./xxx''' , after=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(__lowerCamelCase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
SCREAMING_SNAKE_CASE__ = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
SCREAMING_SNAKE_CASE__ = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
SCREAMING_SNAKE_CASE__ = self.get_launcher(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCamelCase , env=self.get_env() )
return output_dir
def lowercase_ ( self : int , __lowerCamelCase : Tuple=False ) -> Optional[Any]:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
SCREAMING_SNAKE_CASE__ = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 314
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=7 , __lowerCamelCase : Any=3 , __lowerCamelCase : Any=30 , __lowerCamelCase : str=400 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=[0.5, 0.5, 0.5] , __lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[Any]=1 / 255 , __lowerCamelCase : Dict=True , ) -> List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_pad
def lowercase_ ( self : Tuple ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int=False ) -> Optional[int]:
if not batched:
SCREAMING_SNAKE_CASE__ = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.size
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE__ = self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE__ = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = YolosImageProcessor if is_vision_available() else None
def lowercase_ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = YolosImageProcessingTester(self )
@property
def lowercase_ ( self : Tuple ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
def lowercase_ ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCamelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def lowercase_ ( self : Tuple ) -> Optional[int]:
pass
def lowercase_ ( self : int ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Tuple ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : Dict ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self : List[str] ) -> Optional[Any]:
# Initialize image_processings
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE__ = self.image_processing_class(do_resize=__lowerCamelCase , do_normalize=__lowerCamelCase , do_rescale=__lowerCamelCase )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE__ = image_processing_a.pad(__lowerCamelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = image_processing_a(__lowerCamelCase , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def lowercase_ ( self : Union[str, Any] ) -> Optional[int]:
# prepare image and target
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
@slow
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
SCREAMING_SNAKE_CASE__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE__ = YolosImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __lowerCamelCase , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __lowerCamelCase ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __lowerCamelCase , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __lowerCamelCase ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __lowerCamelCase ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __lowerCamelCase ) )
# verify masks
SCREAMING_SNAKE_CASE__ = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __lowerCamelCase )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __lowerCamelCase ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __lowerCamelCase ) )
| 314
| 1
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple ):
a__ = Mock()
a__ = conn, Mock()
a__ = iter([1, None] )
a__ = lambda __lowerCAmelCase : next(__UpperCamelCase )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=__UpperCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 369
|
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
| 0
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _A ( _lowercase ) -> list[list[float]]:
"""simple docstring"""
__UpperCamelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_lowercase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__UpperCamelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
__UpperCamelCase = [[0.0, 0.0], [0.0, 0.0]]
__UpperCamelCase, __UpperCamelCase = matrix[1][1], matrix[0][0]
__UpperCamelCase, __UpperCamelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_lowercase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_lowercase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__UpperCamelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
__UpperCamelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__UpperCamelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__UpperCamelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__UpperCamelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__UpperCamelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__UpperCamelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__UpperCamelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__UpperCamelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__UpperCamelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__UpperCamelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__UpperCamelCase = array(_lowercase )
for i in range(3 ):
for j in range(3 ):
__UpperCamelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__UpperCamelCase = array(_lowercase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_lowercase )
# Calculate the inverse of the matrix
return [[float(d(_lowercase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 310
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case = logging.getLogger(__name__)
def _A ( _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
_lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
_lowercase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_lowercase = field(metadata={"""help""": """Should contain the data files for the task."""} )
_lowercase = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowercase = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _A ( ) -> str:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _lowercase )
# Set seed
set_seed(training_args.seed )
try:
__UpperCamelCase = processors[data_args.task_name]()
__UpperCamelCase = processor.get_labels()
__UpperCamelCase = len(_lowercase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_lowercase ) -> Dict:
__UpperCamelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_lowercase , p.label_ids )}
# Data collator
__UpperCamelCase = DataCollatorWithPadding(_lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , compute_metrics=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(_lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _lowercase , _lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(_lowercase )
return results
def _A ( _lowercase ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 310
| 1
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
lowerCAmelCase : Optional[Any] = parser.parse_args()
lowerCAmelCase : Optional[int] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase : Union[str, Any] = CLIPImageProcessor()
lowerCAmelCase : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
lowerCAmelCase : str = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 358
|
import doctest
from collections import deque
import numpy as np
class __lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] = [1, 2, 3, 4]
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Any = len(self.first_signal)
SCREAMING_SNAKE_CASE_: Dict = len(self.second_signal)
SCREAMING_SNAKE_CASE_: Union[str, Any] = max(lowerCAmelCase__ , lowerCAmelCase__)
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE_: List[Any] = [[0] * max_length for i in range(lowerCAmelCase__)]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Tuple = deque(self.second_signal)
rotated_signal.rotate(lowerCAmelCase__)
for j, item in enumerate(lowerCAmelCase__):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE_: Optional[Any] = np.matmul(np.transpose(lowerCAmelCase__) , np.transpose(self.first_signal))
# rounding-off to two decimal places
return [round(lowerCAmelCase__ , 2) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 127
| 0
|
'''simple docstring'''
UpperCamelCase__ : int = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCamelCase__ : Dict = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCAmelCase ( a_ , a_ , a_ ) -> list[int]:
"""simple docstring"""
A_ : Optional[Any] = True
A_ : Tuple = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(a_ , a_ , a_ )
order.append(a_ )
return order
def UpperCAmelCase ( a_ , a_ , a_ ) -> list[int]:
"""simple docstring"""
A_ : Any = True
A_ : List[Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(a_ , a_ , a_ )
return component
def UpperCAmelCase ( a_ ) -> list[list[int]]:
"""simple docstring"""
A_ : int = len(a_ ) * [False]
A_ : dict[int, list[int]] = {vert: [] for vert in range(len(a_ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(a_ )
A_ : List[Any] = []
for i, was_visited in enumerate(a_ ):
if not was_visited:
order += topology_sort(a_ , a_ , a_ )
A_ : Tuple = []
A_ : List[Any] = len(a_ ) * [False]
for i in range(len(a_ ) ):
A_ : Optional[Any] = order[len(a_ ) - i - 1]
if not visited[vert]:
A_ : Any = find_components(a_ , a_ , a_ )
components_list.append(a_ )
return components_list
| 344
|
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=False , ) -> Optional[int]:
A_ : Union[str, Any] = size if size is not None else {"""height""": 20, """width""": 20}
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
A_ : Optional[Any] = parent
A_ : Optional[int] = batch_size
A_ : Union[str, Any] = num_channels
A_ : str = image_size
A_ : Tuple = min_resolution
A_ : Dict = max_resolution
A_ : str = do_resize
A_ : Tuple = size
A_ : int = do_center_crop
A_ : Dict = crop_size
A_ : Tuple = do_normalize
A_ : List[str] = image_mean
A_ : Optional[Any] = image_std
A_ : Any = do_reduce_labels
def UpperCAmelCase_ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
A_ : Any = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(dataset[0]["""file"""] )
A_ : Dict = Image.open(dataset[1]["""file"""] )
return image, map
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
A_ : Tuple = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Tuple = Image.open(ds[0]["""file"""] )
A_ : List[Any] = Image.open(ds[1]["""file"""] )
A_ : Any = Image.open(ds[2]["""file"""] )
A_ : str = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BeitImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
A_ : List[Any] = BeitImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[int]:
A_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """center_crop""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_lowerCamelCase , """image_std""" ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
A_ : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_lowerCamelCase )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self ) -> Dict:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : int = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : List[Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> str:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
A_ : Union[str, Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
A_ : Optional[int] = []
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
A_ : Optional[Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
A_ , A_ : List[Any] = prepare_semantic_single_inputs()
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
A_ , A_ : str = prepare_semantic_batch_inputs()
A_ : Any = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image_processing
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
A_ , A_ : Tuple = prepare_semantic_single_inputs()
A_ : str = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
A_ : str = True
A_ : Union[str, Any] = image_processing(_lowerCamelCase , _lowerCamelCase , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 344
| 1
|
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : int = 10_00 ) -> int:
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 38
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ShapEImgaImgPipeline
SCREAMING_SNAKE_CASE_ : Any = ["""image"""]
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""image"""]
SCREAMING_SNAKE_CASE_ : Any = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
SCREAMING_SNAKE_CASE_ : Any = False
@property
def __A ( self ) -> Tuple:
return 32
@property
def __A ( self ) -> Optional[int]:
return 32
@property
def __A ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def __A ( self ) -> Union[str, Any]:
return 8
@property
def __A ( self ) -> Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE = CLIPVisionModel(lowerCAmelCase__ )
return model
@property
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = CLIPImageProcessor(
crop_size=224 , do_center_crop=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_resize=lowerCAmelCase__ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def __A ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
SCREAMING_SNAKE_CASE = PriorTransformer(**lowerCAmelCase__ )
return model
@property
def __A ( self ) -> List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE = ShapERenderer(**lowerCAmelCase__ )
return model
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = self.dummy_prior
SCREAMING_SNAKE_CASE = self.dummy_image_encoder
SCREAMING_SNAKE_CASE = self.dummy_image_processor
SCREAMING_SNAKE_CASE = self.dummy_renderer
SCREAMING_SNAKE_CASE = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=lowerCAmelCase__ , clip_sample=lowerCAmelCase__ , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> List[str]:
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = output.images[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Union[str, Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = torch_device == 'cpu'
SCREAMING_SNAKE_CASE = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCAmelCase__ )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
SCREAMING_SNAKE_CASE = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 38
| 1
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowerCamelCase__: Optional[Any] =k.replace(__a , __a )
if k.startswith("encoder" ):
lowerCamelCase__: Optional[Any] =k.replace(".attn" , ".self_attn" )
lowerCamelCase__: Union[str, Any] =k.replace("norm1" , "self_attn_layer_norm" )
lowerCamelCase__: Any =k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
lowerCamelCase__: List[str] =k.replace("norm1" , "self_attn_layer_norm" )
lowerCamelCase__: Dict =k.replace("norm2" , "encoder_attn_layer_norm" )
lowerCamelCase__: Union[str, Any] =k.replace("norm3" , "final_layer_norm" )
return k
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: Any =[
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
lowerCamelCase__: List[Any] =sd.pop(__a )
lowerCamelCase__: Union[str, Any] =k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
lowerCamelCase__: Optional[Any] =v
__A = ["START"]
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a ) -> int:
"""simple docstring"""
lowerCamelCase__: List[Any] =torch.load(__a , map_location="cpu" )
lowerCamelCase__: int =model["model"]
lowerCamelCase__: int =BlenderbotConfig.from_json_file(__a )
lowerCamelCase__: List[Any] =BlenderbotForConditionalGeneration(__a )
lowerCamelCase__: List[str] =m.model.state_dict().keys()
lowerCamelCase__: Dict =[]
lowerCamelCase__: List[str] ={}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowerCamelCase__: Union[str, Any] =rename_state_dict_key(__a )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowerCamelCase__: Any =v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__a )
m.model.load_state_dict(__a , strict=__a )
m.half()
m.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__A = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 10
|
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase__ = datasets.logging.get_logger(__name__)
UpperCamelCase__ = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
UpperCamelCase__ = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
UpperCamelCase__ = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="dummy_doc" ) -> Dict:
UpperCAmelCase__ : List[str] = {doc: key_lines}
UpperCAmelCase__ : int = {doc: sys_lines}
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : Optional[Any] = 0
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = reader.get_doc_mentions(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase__ : int = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : str = reader.get_doc_mentions(lowerCAmelCase__ , sys_doc_lines[doc] , lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase__ : str = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
if remove_nested:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
UpperCAmelCase__ : Dict = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : str = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'''Number of resulting singleton clusters in the key '''
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'''files, respectively''' )
return doc_coref_infos
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
UpperCAmelCase__ : str = get_coref_infos(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Optional[int] = 0
for name, metric in metrics:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = evaluator.evaluate_documents(lowerCAmelCase__ , lowerCAmelCase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , F"""Recall: {recall * 1_00:.2f}""" , F""" Precision: {precision * 1_00:.2f}""" , F""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
UpperCAmelCase__ : Any = (conll / 3) * 1_00
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def a__ ( lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : int = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
UpperCAmelCase__ : str = line.split()[5]
if not parse_col == "-":
UpperCAmelCase__ : Tuple = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowercase_ ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Tuple , _A : Dict=True , _A : Optional[int]=False , _A : str=False , _A : List[str]=False ):
'''simple docstring'''
UpperCAmelCase__ : Any = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
UpperCAmelCase__ : int = util.check_gold_parse_annotation(_A )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
UpperCAmelCase__ : List[str] = evaluate(
key_lines=_A , sys_lines=_A , metrics=_A , NP_only=_A , remove_nested=_A , keep_singletons=_A , min_span=_A , )
return score
| 181
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ) -> int:
lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase = dict(zip(A_ , range(len(A_ ) ) ) )
lowerCAmelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A_ ) )
lowerCAmelCase = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCAmelCase = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(A_ , A_ )
def __snake_case ( self , **A_ ) -> Tuple:
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **A_ )
def __snake_case ( self , **A_ ) -> List[str]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **A_ )
def __snake_case ( self , **A_ ) -> List[Any]:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def __snake_case ( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ) -> Any:
lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
lowerCAmelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def __snake_case ( self ) -> Dict:
lowerCAmelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase = self.get_image_processor(do_normalize=A_ )
lowerCAmelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=A_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(A_ , return_tensors="""np""" )
lowerCAmelCase = processor(images=A_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = processor(text=A_ , return_tensors="""np""" )
lowerCAmelCase = tokenizer(A_ , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def __snake_case ( self ) -> Dict:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __snake_case ( self ) -> str:
lowerCAmelCase = """google/owlvit-base-patch32"""
lowerCAmelCase = OwlViTProcessor.from_pretrained(A_ )
lowerCAmelCase = ["""cat""", """nasa badge"""]
lowerCAmelCase = processor(text=A_ )
lowerCAmelCase = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = """google/owlvit-base-patch32"""
lowerCAmelCase = OwlViTProcessor.from_pretrained(A_ )
lowerCAmelCase = [["""cat""", """nasa badge"""], ["""person"""]]
lowerCAmelCase = processor(text=A_ )
lowerCAmelCase = 16
lowerCAmelCase = len(A_ )
lowerCAmelCase = max([len(A_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = """google/owlvit-base-patch32"""
lowerCAmelCase = OwlViTProcessor.from_pretrained(A_ )
lowerCAmelCase = ["""cat""", """nasa badge"""]
lowerCAmelCase = processor(text=A_ )
lowerCAmelCase = 16
lowerCAmelCase = inputs["""input_ids"""]
lowerCAmelCase = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(images=A_ , query_images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __snake_case ( self ) -> Dict:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(A_ )
lowerCAmelCase = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
| 187
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
UpperCAmelCase = False
UpperCAmelCase = False
def _snake_case ( _SCREAMING_SNAKE_CASE : Namespace ) -> Tuple:
"""simple docstring"""
return TrainCommand(_SCREAMING_SNAKE_CASE )
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
@staticmethod
def __snake_case ( A_ ) -> Optional[int]:
lowerCAmelCase = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=A_ , required=A_ , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=A_ , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=A_ , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=A_ , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=A_ , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=A_ , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=A_ , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=A_ , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=A_ , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=A_ , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=A_ , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=A_ , default=3e-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=A_ , default=1e-08 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=A_ )
def __init__( self , A_ ) -> Tuple:
lowerCAmelCase = logging.get_logger("""transformers-cli/training""" )
lowerCAmelCase = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=A_ )
lowerCAmelCase = args.output
lowerCAmelCase = args.column_label
lowerCAmelCase = args.column_text
lowerCAmelCase = args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
lowerCAmelCase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
lowerCAmelCase = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCAmelCase = None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
lowerCAmelCase = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCAmelCase = args.validation_split
lowerCAmelCase = args.train_batch_size
lowerCAmelCase = args.valid_batch_size
lowerCAmelCase = args.learning_rate
lowerCAmelCase = args.adam_epsilon
def __snake_case ( self ) -> Optional[int]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __snake_case ( self ) -> Tuple:
raise NotImplementedError
def __snake_case ( self ) -> Tuple:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 187
| 1
|
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
A__ = TypeVar("""KT""")
A__ = TypeVar("""VT""")
class __lowerCAmelCase ( Generic[KT, VT] ):
def __init__( self , _snake_case = "root" , _snake_case = None ):
"""simple docstring"""
_lowerCAmelCase = key
_lowerCAmelCase = value
_lowerCAmelCase = []
def __repr__( self ):
"""simple docstring"""
return F'Node({self.key}: {self.value})'
@property
def snake_case ( self ):
"""simple docstring"""
return len(self.forward )
class __lowerCAmelCase ( Generic[KT, VT] ):
def __init__( self , _snake_case = 0.5 , _snake_case = 16 ):
"""simple docstring"""
_lowerCAmelCase = Node[KT, VT]()
_lowerCAmelCase = 0
_lowerCAmelCase = p
_lowerCAmelCase = max_level
def __str__( self ):
"""simple docstring"""
_lowerCAmelCase = list(self )
if len(_snake_case ) == 0:
return F'SkipList(level={self.level})'
_lowerCAmelCase = max((len(str(_snake_case ) ) for item in items) , default=4 )
_lowerCAmelCase = max(_snake_case , 4 ) + 4
_lowerCAmelCase = self.head
_lowerCAmelCase = []
_lowerCAmelCase = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(_snake_case , """-""" ) + """* """ * len(_snake_case ) )
lines.append(""" """ * label_size + """| """ * len(_snake_case ) )
while len(node.forward ) != 0:
_lowerCAmelCase = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(_snake_case , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(_snake_case ) )
_lowerCAmelCase = node.forward
lines.append("""None""".ljust(_snake_case ) + """* """ * len(_snake_case ) )
return F'SkipList(level={self.level})\n' + "\n".join(_snake_case )
def __iter__( self ):
"""simple docstring"""
_lowerCAmelCase = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
_lowerCAmelCase = node.forward[0]
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_lowerCAmelCase = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_snake_case )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self._locate_node(_snake_case )
if node is not None:
for i, update_node in enumerate(_snake_case ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_lowerCAmelCase = node.forward[i]
else:
_lowerCAmelCase = update_node.forward[:i]
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self._locate_node(_snake_case )
if node is not None:
_lowerCAmelCase = value
else:
_lowerCAmelCase = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _snake_case ):
update_vector.append(self.head )
_lowerCAmelCase = level
_lowerCAmelCase = Node(_snake_case , _snake_case )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_snake_case )
else:
_lowerCAmelCase = new_node
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self._locate_node(_snake_case )
if node is not None:
return node.value
return None
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 12 )
skip_list.insert("""Key3""" , 41 )
skip_list.insert("""Key4""" , -19 )
_lowerCAmelCase = skip_list.head
_lowerCAmelCase = {}
while node.level != 0:
_lowerCAmelCase = node.forward[0]
_lowerCAmelCase = node.value
assert len(snake_case ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 10 )
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 10 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 10 )
_lowerCAmelCase = skip_list.head
_lowerCAmelCase = {}
while node.level != 0:
_lowerCAmelCase = node.forward[0]
_lowerCAmelCase = node.value
if len(snake_case ) != 4:
print()
assert len(snake_case ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
assert skip_list.find("""Some key""" ) is None
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key2""" , 20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""" , 10 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 1_42 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""X""" )
def traverse_keys(snake_case ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(snake_case )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _UpperCAmelCase ( ):
"""simple docstring"""
def is_sorted(snake_case ):
return all(next_item >= item for item, next_item in zip(snake_case , lst[1:] ) )
_lowerCAmelCase = SkipList()
for i in range(10 ):
skip_list.insert(snake_case , snake_case )
assert is_sorted(list(snake_case ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(snake_case ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(snake_case ) )
def _UpperCAmelCase ( ):
"""simple docstring"""
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 82
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: List[str] = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[int] = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
A: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109
| 0
|
from __future__ import annotations
from typing import Any
class A__ :
def __init__( self : str , a : int , a : int , a : float = 0 ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = row, column
lowerCAmelCase__ : Any = [[default_value for c in range(a )] for r in range(a )]
def __str__( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCAmelCase__ : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase__ : List[Any] = max(a , len(str(a ) ) )
lowerCAmelCase__ : List[Any] = f'''%{max_element_length}s'''
# Make string and return
def single_line(a : list[float] ) -> str:
nonlocal string_format_identifier
lowerCAmelCase__ : str = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(a ) for row_vector in self.array )
return s
def __repr__( self : int ):
'''simple docstring'''
return str(self )
def _lowerCamelCase ( self : int , a : tuple[int, int] ):
'''simple docstring'''
if not (isinstance(a , (list, tuple) ) and len(a ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : int , a : tuple[int, int] ):
'''simple docstring'''
assert self.validate_indicies(a )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Optional[int] , a : tuple[int, int] , a : float ):
'''simple docstring'''
assert self.validate_indicies(a )
lowerCAmelCase__ : Any = value
def __add__( self : Dict , a : Matrix ):
'''simple docstring'''
assert isinstance(a , a )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase__ : List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase__ : Any = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase__ : Optional[int] = -self[r, c]
return result
def __sub__( self : List[str] , a : Matrix ):
'''simple docstring'''
return self + (-another)
def __mul__( self : List[Any] , a : int | float | Matrix ):
'''simple docstring'''
if isinstance(a , (int, float) ): # Scalar multiplication
lowerCAmelCase__ : Tuple = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase__ : Optional[int] = self[r, c] * another
return result
elif isinstance(a , a ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase__ : str = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase__ : Dict = f'''Unsupported type given for another ({type(a )})'''
raise TypeError(a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase__ : int = self[r, c]
return result
def _lowerCamelCase ( self : Optional[Any] , a : Matrix , a : Matrix ):
'''simple docstring'''
assert isinstance(a , a ) and isinstance(a , a )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase__ : List[Any] = v.transpose()
lowerCAmelCase__ : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCAmelCase__ ( ) -> None:
# a^(-1)
lowerCAmelCase__ : int = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase__ : Dict = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
lowerCAmelCase__ : Union[str, Any] = Matrix(3 , 1 , 0 )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = 1, 2, -3
lowerCAmelCase__ : Union[str, Any] = Matrix(3 , 1 , 0 )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}''' )
def lowerCAmelCase__ ( ) -> None:
import doctest
doctest.testmod()
testa()
| 307
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = UnCLIPImageVariationPipeline
lowercase = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase = IMAGE_VARIATION_BATCH_PARAMS
lowercase = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase = False
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
lowerCAmelCase__ : Optional[Any] = UnCLIPTextProjModel(**a )
return model
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
lowerCAmelCase__ : str = UNetaDConditionModel(**a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(1 )
lowerCAmelCase__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.dummy_decoder
lowerCAmelCase__ : Optional[int] = self.dummy_text_proj
lowerCAmelCase__ : Any = self.dummy_text_encoder
lowerCAmelCase__ : Any = self.dummy_tokenizer
lowerCAmelCase__ : Any = self.dummy_super_res_first
lowerCAmelCase__ : Optional[int] = self.dummy_super_res_last
lowerCAmelCase__ : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , )
lowerCAmelCase__ : Any = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase__ : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowerCamelCase ( self : Any , a : Dict , a : List[str]=0 , a : List[str]=True ):
'''simple docstring'''
lowerCAmelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
if pil_image:
lowerCAmelCase__ : Optional[int] = input_image * 0.5 + 0.5
lowerCAmelCase__ : Dict = input_image.clamp(0 , 1 )
lowerCAmelCase__ : List[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ : Union[str, Any] = DiffusionPipeline.numpy_to_pil(a )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 'cpu'
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[str] = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : str = pipe(**a )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : str = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'cpu'
lowerCAmelCase__ : Dict = self.get_dummy_components()
lowerCAmelCase__ : Optional[int] = self.pipeline_class(**a )
lowerCAmelCase__ : int = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = pipe(**a )
lowerCAmelCase__ : Union[str, Any] = output.images
lowerCAmelCase__ : int = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : int = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Tuple = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : List[str] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
lowerCAmelCase__ : Optional[int] = pipe(**a )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Union[str, Any] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
lowerCAmelCase__ : str = pipe(
**a , return_dict=a , )[0]
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.device('cpu' )
class A__ :
lowercase = 1
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe.decoder.dtype
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : str = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase__ : List[Any] = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[str] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase__ : Any = pipe.prepare_latents(
a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a , pil_image=a )
lowerCAmelCase__ : Optional[int] = pipe(
**a , decoder_latents=a , super_res_latents=a ).images
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs(a , pil_image=a )
# Don't pass image, instead pass embedding
lowerCAmelCase__ : Union[str, Any] = pipeline_inputs.pop('image' )
lowerCAmelCase__ : Union[str, Any] = pipe.image_encoder(a ).image_embeds
lowerCAmelCase__ : List[Any] = pipe(
**a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase__ : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=a , expected_max_diff=a )
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase__ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=a , additional_params_copy_to_batched_inputs=a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=a )
@skip_mps
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
lowerCAmelCase__ : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
lowerCAmelCase__ : Tuple = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Union[str, Any] = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ : List[str] = pipeline(
a , generator=a , output_type='np' , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(a , a , 15 )
| 307
| 1
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : List[Any] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 5_000 ):
__a : Optional[Any] = [(i * (3 * i - 1)) // 2 for i in range(1 , _SCREAMING_SNAKE_CASE )]
for i, pentagonal_i in enumerate(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ):
__a : str = pentagonal_nums[j]
__a : Tuple = pentagonal_i + pentagonal_j
__a : Optional[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_SCREAMING_SNAKE_CASE ) and is_pentagonal(_SCREAMING_SNAKE_CASE ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 27
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_SCREAMING_SNAKE_CASE : Optional[Any] = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_SCREAMING_SNAKE_CASE : List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_SCREAMING_SNAKE_CASE : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = len([g for position, g in enumerate(UpperCamelCase_ ) if g == main_target[position]] )
return (item, float(UpperCamelCase_ ))
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = random.randint(0 ,len(UpperCamelCase_ ) - 1 )
snake_case = parent_a[:random_slice] + parent_a[random_slice:]
snake_case = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = list(UpperCamelCase_ )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
snake_case = random.choice(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,):
"""simple docstring"""
snake_case = []
# Generate more children proportionally to the fitness score.
snake_case = int(parent_a[1] * 1_00 ) + 1
snake_case = 10 if child_n >= 10 else child_n
for _ in range(UpperCamelCase_ ):
snake_case = population_score[random.randint(0 ,UpperCamelCase_ )][0]
snake_case , snake_case = crossover(parent_a[0] ,UpperCamelCase_ )
# Append new string to the population list.
pop.append(mutate(UpperCamelCase_ ,UpperCamelCase_ ) )
pop.append(mutate(UpperCamelCase_ ,UpperCamelCase_ ) )
return pop
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
snake_case = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(UpperCamelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
snake_case = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
snake_case = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(UpperCamelCase_ )
# Generate random starting population.
snake_case = []
for _ in range(UpperCamelCase_ ):
population.append(''''''.join([random.choice(UpperCamelCase_ ) for i in range(len(UpperCamelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
snake_case , snake_case = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(UpperCamelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
snake_case = [evaluate(UpperCamelCase_ ,UpperCamelCase_ ) for item in population]
# Check if there is a matching evolution.
snake_case = sorted(UpperCamelCase_ ,key=lambda UpperCamelCase_ : x[1] ,reverse=UpperCamelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
snake_case = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(UpperCamelCase_ )
# Normalize population score to be between 0 and 1.
snake_case = [
(item, score / len(UpperCamelCase_ )) for item, score in population_score
]
# This is selection
for i in range(UpperCamelCase_ ):
population.extend(select(population_score[int(UpperCamelCase_ )] ,UpperCamelCase_ ,UpperCamelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(UpperCamelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_SCREAMING_SNAKE_CASE : str = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 127
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a_ ( a_ ):
'''simple docstring'''
__a: Dict = '''perceiver'''
def __init__( self , lowercase_=2_5_6 , lowercase_=1_2_8_0 , lowercase_=7_6_8 , lowercase_=1 , lowercase_=2_6 , lowercase_=8 , lowercase_=8 , lowercase_=None , lowercase_=None , lowercase_="kv" , lowercase_=1 , lowercase_=1 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=2_6_2 , lowercase_=2_0_4_8 , lowercase_=5_6 , lowercase_=[3_6_8, 4_9_6] , lowercase_=1_6 , lowercase_=1_9_2_0 , lowercase_=1_6 , lowercase_=[1, 1_6, 2_2_4, 2_2_4] , **lowercase_ , ) -> Dict:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = num_latents
lowerCAmelCase_ = d_latents
lowerCAmelCase_ = d_model
lowerCAmelCase_ = num_blocks
lowerCAmelCase_ = num_self_attends_per_block
lowerCAmelCase_ = num_self_attention_heads
lowerCAmelCase_ = num_cross_attention_heads
lowerCAmelCase_ = qk_channels
lowerCAmelCase_ = v_channels
lowerCAmelCase_ = cross_attention_shape_for_attention
lowerCAmelCase_ = self_attention_widening_factor
lowerCAmelCase_ = cross_attention_widening_factor
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = use_query_residual
# masked language modeling attributes
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_position_embeddings
# image classification attributes
lowerCAmelCase_ = image_size
# flow attributes
lowerCAmelCase_ = train_size
# multimodal autoencoding attributes
lowerCAmelCase_ = num_frames
lowerCAmelCase_ = audio_samples_per_frame
lowerCAmelCase_ = samples_per_patch
lowerCAmelCase_ = output_shape
class a_ ( a_ ):
'''simple docstring'''
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def _lowercase ( self ) -> float:
'''simple docstring'''
return 1e-4
def _lowercase ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , lowercase_ = 3 , lowercase_ = 4_0 , lowercase_ = 4_0 , ) -> Mapping[str, Any]:
'''simple docstring'''
if isinstance(lowercase_ , lowercase_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase_ = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase_ = preprocessor.num_special_tokens_to_add(lowercase_ )
lowerCAmelCase_ = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase_ = [' '.join(['a'] ) * seq_length] * batch_size
lowerCAmelCase_ = dict(preprocessor(lowercase_ , return_tensors=lowercase_ ) )
lowerCAmelCase_ = inputs.pop('input_ids' )
return inputs
elif isinstance(lowercase_ , lowercase_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase_ = compute_effective_axis_dimension(lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowerCAmelCase_ = self._generate_dummy_images(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase_ = dict(preprocessor(images=lowercase_ , return_tensors=lowercase_ ) )
lowerCAmelCase_ = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 355
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def lowerCamelCase ( a_ , a_ , a_=None , a_=None ) -> int:
if attention_mask is None:
lowerCAmelCase_ = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class a_ :
'''simple docstring'''
__a: Tuple = OPTConfig
__a: Optional[Any] = {}
__a: Tuple = '''gelu'''
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=9_9 , lowercase_=1_6 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=2_0 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=1_6 , lowercase_=1_6 , ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = word_embed_proj_dim
lowerCAmelCase_ = False
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase_ , **self.config_updates , )
lowerCAmelCase_ = prepare_opt_inputs_dict(lowercase_ , lowercase_ )
return config, inputs_dict
def _lowercase ( self , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModel(config=lowercase_ )
lowerCAmelCase_ = inputs_dict['input_ids']
lowerCAmelCase_ = input_ids[:1, :]
lowerCAmelCase_ = inputs_dict['attention_mask'][:1, :]
lowerCAmelCase_ = 1
# first forward pass
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )[0]
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
@require_tf
class a_ ( a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: Optional[int] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__a: Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
__a: Union[str, Any] = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
__a: int = False
__a: List[Any] = False
__a: Dict = False
__a: List[Any] = 1_0
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=lowercase_ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase_ , lowercase_ ):
if hasattr(lowercase_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
lowerCAmelCase_ = model_class(config=lowercase_ )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase_ )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCAmelCase_ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase_ )
# check that weights remain the same after resizing
lowerCAmelCase_ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase_ = False
self.assertTrue(lowercase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase_ )
lowerCAmelCase_ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase_ = False
self.assertTrue(lowercase_ )
def lowerCamelCase ( a_ ) -> Any:
return tf.constant(a_ , dtype=tf.intaa )
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
__a: Optional[int] = 9_9
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCAmelCase_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCAmelCase_ = input_ids.shape[0]
lowerCAmelCase_ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModel.from_pretrained('facebook/opt-350m' )
lowerCAmelCase_ = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase_ = tf.not_equal(lowercase_ , model.config.pad_token_id )
with tf.GradientTape():
lowerCAmelCase_ = model(input_ids=lowercase_ , attention_mask=lowercase_ ).last_hidden_state
lowerCAmelCase_ = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowercase_ )
lowerCAmelCase_ = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-3 ) )
lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ )
lowerCAmelCase_ = xla_generate(lowercase_ , lowercase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-2 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowerCAmelCase_ = 'facebook/opt-350m'
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(self.path_model )
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ , add_special_tokens=lowercase_ )
lowerCAmelCase_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCAmelCase_ = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ )
lowerCAmelCase_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-125m'
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCAmelCase_ = []
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-350m'
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
lowerCAmelCase_ = 'left'
# use different length sentences to test batching
lowerCAmelCase_ = [
'Hello, my dog is a little',
'Today, I',
]
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ )
lowerCAmelCase_ = inputs['input_ids']
lowerCAmelCase_ = model.generate(input_ids=lowercase_ , attention_mask=inputs['attention_mask'] )
lowerCAmelCase_ = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(input_ids=lowercase_ )
lowerCAmelCase_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
lowerCAmelCase_ = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(input_ids=lowercase_ , max_length=model.config.max_length - num_paddings )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-350m'
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCAmelCase_ = []
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
| 14
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.