code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
def _a ( SCREAMING_SNAKE_CASE_ : int ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("multiplicative_persistence() only accepts integral values" )
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values" )
__lowerCAmelCase = 0
__lowerCAmelCase = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
__lowerCAmelCase = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
__lowerCAmelCase = 1
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total *= numbers[i]
__lowerCAmelCase = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
def _a ( SCREAMING_SNAKE_CASE_ : int ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("additive_persistence() only accepts integral values" )
if num < 0:
raise ValueError("additive_persistence() does not accept negative values" )
__lowerCAmelCase = 0
__lowerCAmelCase = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
__lowerCAmelCase = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
__lowerCAmelCase = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total += numbers[i]
__lowerCAmelCase = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92
|
import itertools
import math
def a ( snake_case__: int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( ):
'''simple docstring'''
lowercase_ = 2
while True:
if is_prime(snake_case__ ):
yield num
num += 1
def a ( snake_case__: int = 10_001 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , snake_case__ ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 30
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase : int = logging.getLogger(__name__)
@dataclass
class __UpperCAmelCase :
__lowercase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__lowercase = field(
default=lowerCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__lowercase = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} )
__lowercase = field(
default=lowerCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__lowercase = field(default=lowerCamelCase__ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowercase = field(
default=lowerCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __UpperCAmelCase :
__lowercase = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} )
__lowercase = field(
default=lowerCamelCase__ , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
__lowercase = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__lowercase = field(
default=lowerCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
_snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
_snake_case = import_module('tasks' )
try:
_snake_case = getattr(lowerCamelCase_ , model_args.task_type )
_snake_case = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , lowerCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
_snake_case = token_classification_task.get_labels(data_args.labels )
_snake_case = dict(enumerate(lowerCamelCase_ ) )
_snake_case = len(lowerCamelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid={label: i for i, label in enumerate(lowerCamelCase_ )} , cache_dir=model_args.cache_dir , )
_snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
_snake_case = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCamelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
_snake_case = (
TokenClassificationDataset(
token_classification_task=lowerCamelCase_ , data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , labels=lowerCamelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_snake_case = (
TokenClassificationDataset(
token_classification_task=lowerCamelCase_ , data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , labels=lowerCamelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__A , __A ) -> Tuple[List[int], List[int]]:
_snake_case = np.argmax(lowerCamelCase_ , axis=2 )
_snake_case = preds.shape
_snake_case = [[] for _ in range(lowerCamelCase_ )]
_snake_case = [[] for _ in range(lowerCamelCase_ )]
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__A ) -> Dict:
_snake_case = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(lowerCamelCase_ , lowerCamelCase_ ),
"precision": precision_score(lowerCamelCase_ , lowerCamelCase_ ),
"recall": recall_score(lowerCamelCase_ , lowerCamelCase_ ),
"f1": fa_score(lowerCamelCase_ , lowerCamelCase_ ),
}
# Data collator
_snake_case = DataCollatorWithPadding(lowerCamelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_snake_case = Trainer(
model=lowerCamelCase_ , args=lowerCamelCase_ , train_dataset=lowerCamelCase_ , eval_dataset=lowerCamelCase_ , compute_metrics=lowerCamelCase_ , data_collator=lowerCamelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_snake_case = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_snake_case = trainer.evaluate()
_snake_case = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , lowerCamelCase_ , lowerCamelCase_ )
writer.write('%s = %s\n' % (key, value) )
results.update(lowerCamelCase_ )
# Predict
if training_args.do_predict:
_snake_case = TokenClassificationDataset(
token_classification_task=lowerCamelCase_ , data_dir=data_args.data_dir , tokenizer=lowerCamelCase_ , labels=lowerCamelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
_snake_case = trainer.predict(lowerCamelCase_ )
_snake_case = align_predictions(lowerCamelCase_ , lowerCamelCase_ )
_snake_case = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , lowerCamelCase_ , lowerCamelCase_ )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
_snake_case = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(lowerCamelCase_ , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return results
def SCREAMING_SNAKE_CASE__ ( __A ) -> Tuple:
main()
if __name__ == "__main__":
main()
| 356
|
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
_snake_case = img
_snake_case = img.shape[1]
_snake_case = img.shape[0]
_snake_case = dst_width
_snake_case = dst_height
_snake_case = self.src_w / self.dst_w
_snake_case = self.src_h / self.dst_h
_snake_case = _snake_case = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def lowerCamelCase ( self ):
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
_snake_case = self.img[self.get_y(lowerCAmelCase_ )][self.get_x(lowerCAmelCase_ )]
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return int(self.ratio_x * x )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
lowercase , lowercase : Optional[Any] = 800, 600
lowercase : Tuple = imread("image_data/lena.jpg", 1)
lowercase : Any = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 160
| 0
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a__ ( snake_case__ ):
def __init__( self , _A , _A , _A , _A , ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = value_function
__lowerCAmelCase = unet
__lowerCAmelCase = scheduler
__lowerCAmelCase = env
__lowerCAmelCase = env.get_dataset()
__lowerCAmelCase = {}
for key in self.data.keys():
try:
__lowerCAmelCase = self.data[key].mean()
except: # noqa: E722
pass
__lowerCAmelCase = {}
for key in self.data.keys():
try:
__lowerCAmelCase = self.data[key].std()
except: # noqa: E722
pass
__lowerCAmelCase = env.observation_space.shape[0]
__lowerCAmelCase = env.action_space.shape[0]
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
if type(_A ) is dict:
return {k: self.to_torch(_A ) for k, v in x_in.items()}
elif torch.is_tensor(_A ):
return x_in.to(self.unet.device )
return torch.tensor(_A , device=self.unet.device )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
for key, val in cond.items():
__lowerCAmelCase = val.clone()
return x_in
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = x.shape[0]
__lowerCAmelCase = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
__lowerCAmelCase = torch.full((batch_size,) , _A , device=self.unet.device , dtype=torch.long )
for _ in range(_A ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
__lowerCAmelCase = self.value_function(x.permute(0 , 2 , 1 ) , _A ).sample
__lowerCAmelCase = torch.autograd.grad([y.sum()] , [x] )[0]
__lowerCAmelCase = self.scheduler._get_variance(_A )
__lowerCAmelCase = torch.exp(0.5 * posterior_variance )
__lowerCAmelCase = model_std * grad
__lowerCAmelCase = 0
__lowerCAmelCase = x.detach()
__lowerCAmelCase = x + scale * grad
__lowerCAmelCase = self.reset_xa(_A , _A , self.action_dim )
__lowerCAmelCase = self.unet(x.permute(0 , 2 , 1 ) , _A ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
__lowerCAmelCase = self.scheduler.step(_A , _A , _A , predict_epsilon=_A )["prev_sample"]
# apply conditions to the trajectory (set the initial state)
__lowerCAmelCase = self.reset_xa(_A , _A , self.action_dim )
__lowerCAmelCase = self.to_torch(_A )
return x, y
def __call__( self , _A , _A=6_4 , _A=3_2 , _A=2 , _A=0.1 ):
"""simple docstring"""
__lowerCAmelCase = self.normalize(_A , "observations" )
__lowerCAmelCase = obs[None].repeat(_A , axis=0 )
__lowerCAmelCase = {0: self.to_torch(_A )}
__lowerCAmelCase = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
__lowerCAmelCase = randn_tensor(_A , device=self.unet.device )
__lowerCAmelCase = self.reset_xa(_A , _A , self.action_dim )
__lowerCAmelCase = self.to_torch(_A )
# run the diffusion process
__lowerCAmelCase , __lowerCAmelCase = self.run_diffusion(_A , _A , _A , _A )
# sort output trajectories by value
__lowerCAmelCase = y.argsort(0 , descending=_A ).squeeze()
__lowerCAmelCase = x[sorted_idx]
__lowerCAmelCase = sorted_values[:, :, : self.action_dim]
__lowerCAmelCase = actions.detach().cpu().numpy()
__lowerCAmelCase = self.de_normalize(_A , key="actions" )
# select the action with the highest value
if y is not None:
__lowerCAmelCase = 0
else:
# if we didn't run value guiding, select a random action
__lowerCAmelCase = np.random.randint(0 , _A )
__lowerCAmelCase = denorm_actions[selected_index, 0]
return denorm_actions
| 92
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCamelCase__ = logging.get_logger(__name__)
@dataclass
class a__ ( snake_case__ ):
_a : List[str] = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **_A ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowerCAmelCase = deprecated_arg[3:]
__lowerCAmelCase = not kwargs.pop(_A )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__lowerCAmelCase = kwargs.pop("tpu_name" , self.tpu_name )
__lowerCAmelCase = kwargs.pop("device_idx" , self.device_idx )
__lowerCAmelCase = kwargs.pop("eager_mode" , self.eager_mode )
__lowerCAmelCase = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**_A )
_a : str = field(
default=snake_case__ , metadata={"""help""": """Name of TPU"""} , )
_a : int = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
_a : bool = field(default=snake_case__ , metadata={"""help""": """Benchmark models in eager model."""} )
_a : bool = field(
default=snake_case__ , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
__lowerCAmelCase = None
if self.tpu:
try:
if self.tpu_name:
__lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__lowerCAmelCase = None
return tpu
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__lowerCAmelCase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
__lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
__lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.n_gpu > 0
| 92
| 1
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCAmelCase__ = MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase__ = TF_MODEL_FOR_MASKED_LM_MAPPING
def SCREAMING_SNAKE_CASE_( self ) -> Any:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
lowerCamelCase_ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
lowerCamelCase_ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
lowerCamelCase_ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
lowerCamelCase_ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
lowerCamelCase_ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
lowerCamelCase_ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
lowerCamelCase_ = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
lowerCamelCase_ = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase , lowercase )
@slow
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(lowercase )
@slow
@require_tf
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
lowerCamelCase_ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowercase ) , [
{"sequence": "My name is John", "score": 0.0_0_8, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.0_0_7, "token": 1573, "token_str": " Chris"},
] , )
lowerCamelCase_ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowercase ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.2_5_1,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.2_1_4,
"token": 12790,
"token_str": " Lyon",
},
] , )
lowerCamelCase_ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowercase ) , [
{"sequence": "My name is Patrick", "score": 0.0_0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.0_0_0, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.0_0_0, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
lowerCamelCase_ = None
lowerCamelCase_ = None
self.run_pipeline_test(lowercase , [] )
@require_tf
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
lowerCamelCase_ = None
lowerCamelCase_ = None
self.run_pipeline_test(lowercase , [] )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> int:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
lowerCamelCase_ = [
f'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Any:
lowerCamelCase_ = fill_masker.tokenizer
lowerCamelCase_ = fill_masker.model
lowerCamelCase_ = fill_masker(
f'This is a {tokenizer.mask_token}' , )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
lowerCamelCase_ = fill_masker([f'This is a {tokenizer.mask_token}'] )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
lowerCamelCase_ = fill_masker([f'This is a {tokenizer.mask_token}', f'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
lowercase , [
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
] , )
with self.assertRaises(lowercase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowercase ):
fill_masker("This is" )
self.run_test_top_k(lowercase , lowercase )
self.run_test_targets(lowercase , lowercase )
self.run_test_top_k_targets(lowercase , lowercase )
self.fill_mask_with_duplicate_targets_and_top_k(lowercase , lowercase )
self.fill_mask_with_multiple_masks(lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> List[Any]:
lowerCamelCase_ = tokenizer.get_vocab()
lowerCamelCase_ = sorted(vocab.keys() )[:2]
# Pipeline argument
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase , targets=lowercase )
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
lowerCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase )
lowerCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase ) )
# Call argument
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
lowerCamelCase_ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowercase )
lowerCamelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowercase ) )
# Score equivalence
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase )
lowerCamelCase_ = [top_mask["token_str"] for top_mask in outputs]
lowerCamelCase_ = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase ) == set(lowercase ):
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets=lowercase )
lowerCamelCase_ = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase ) , nested_simplify(lowercase ) )
# Raises with invalid
with self.assertRaises(lowercase ):
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase ):
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets=[""] )
with self.assertRaises(lowercase ):
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , targets="" )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Any:
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase , top_k=2 )
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
lowercase , [
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
] , )
self.assertEqual(nested_simplify(lowercase ) , nested_simplify(lowercase ) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> List[str]:
lowerCamelCase_ = tokenizer.get_vocab()
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
# top_k=2, ntargets=3
lowerCamelCase_ = sorted(vocab.keys() )[:3]
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=2 , targets=lowercase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
lowerCamelCase_ = [el["token_str"] for el in sorted(lowercase , key=lambda lowercase : x["score"] , reverse=lowercase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase ).issubset(lowercase ):
lowerCamelCase_ = fill_masker(f'This is a {tokenizer.mask_token}' , top_k=3 , targets=lowercase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase ) , nested_simplify(lowercase ) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Tuple:
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
lowerCamelCase_ = tokenizer.get_vocab()
# String duplicates + id duplicates
lowerCamelCase_ = sorted(vocab.keys() )[:3]
lowerCamelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
lowerCamelCase_ = fill_masker(f'My name is {tokenizer.mask_token}' , targets=lowercase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase ) , 3 )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Dict:
lowerCamelCase_ = FillMaskPipeline(model=lowercase , tokenizer=lowercase )
lowerCamelCase_ = fill_masker(
f'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' , top_k=2 )
self.assertEqual(
lowercase , [
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
[
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
{"sequence": ANY(lowercase ), "score": ANY(lowercase ), "token": ANY(lowercase ), "token_str": ANY(lowercase )},
],
] , )
| 371
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase_ = 1_9_2
lowerCamelCase_ = 7_6_8
lowerCamelCase_ = 1_2
lowerCamelCase_ = 3
lowerCamelCase_ = [8_0_0, 1_3_3_3]
lowerCamelCase_ = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = 3_3_0
lowerCamelCase_ = 1_4
lowerCamelCase_ = 6
lowerCamelCase_ = 1_3_2_0
elif "yolos_s" in yolos_name:
lowerCamelCase_ = 3_8_4
lowerCamelCase_ = 1_5_3_6
lowerCamelCase_ = 1_2
lowerCamelCase_ = 6
elif "yolos_b" in yolos_name:
lowerCamelCase_ = [8_0_0, 1_3_4_4]
lowerCamelCase_ = 9_1
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = "coco-detection-id2label.json"
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowerCamelCase_ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ = in_proj_bias[: config.hidden_size]
lowerCamelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( lowerCamelCase__ ):
if "backbone" in name:
lowerCamelCase_ = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCamelCase_ = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCamelCase_ = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCamelCase_ = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCamelCase_ = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCamelCase_ = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCamelCase_ = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[2] )
lowerCamelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
lowerCamelCase_ = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowerCamelCase_ = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase_ = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
lowerCamelCase_ = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = outputs.logits, outputs.pred_boxes
lowerCamelCase_ , lowerCamelCase_ = None, None
if yolos_name == "yolos_ti":
lowerCamelCase_ = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCamelCase_ = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase_ = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCamelCase_ = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase_ = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCamelCase_ = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCamelCase_ = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCamelCase_ = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCamelCase_ = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowerCamelCase_ = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCamelCase_ = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 47
| 0
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Optional[Any] ) -> bool:
_lowerCAmelCase : List[str] = len(_lowerCAmelCase )
_lowerCAmelCase : Dict = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowerCAmelCase : List[str] = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_lowerCAmelCase : int = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_lowerCAmelCase : Optional[int] = subset[i - 1][j]
if arr[i - 1] <= j:
_lowerCAmelCase : Optional[int] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_:List[Any] = """▁"""
SCREAMING_SNAKE_CASE_:int = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE_:Any = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
SCREAMING_SNAKE_CASE_:Optional[Any] = {
"""google/pegasus-xsum""": 512,
}
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
__lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : str = ["input_ids", "attention_mask"]
def __init__( self, lowerCamelCase__, lowerCamelCase__="<pad>", lowerCamelCase__="</s>", lowerCamelCase__="<unk>", lowerCamelCase__="<mask_2>", lowerCamelCase__="<mask_1>", lowerCamelCase__=None, lowerCamelCase__=103, lowerCamelCase__ = None, **lowerCamelCase__, ):
A : int = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase__, lowerCamelCase__ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowerCamelCase__ )}, but is'''
f''' {type(lowerCamelCase__ )}''' )
A : Dict = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowerCamelCase__ ), self.offset - 1 )
]
if len(set(lowerCamelCase__ ) ) != len(lowerCamelCase__ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
A : int = additional_special_tokens_extended
else:
A : Optional[int] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2, self.offset )]
A : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCamelCase__, unk_token=lowerCamelCase__, mask_token=lowerCamelCase__, pad_token=lowerCamelCase__, mask_token_sent=lowerCamelCase__, offset=lowerCamelCase__, additional_special_tokens=lowerCamelCase__, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase__, )
A : Union[str, Any] = mask_token_sent
A : Optional[Any] = vocab_file
A : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
# add special tokens to encoder dict
A : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1, self.offset - 1 )} )
A : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def _lowerCAmelCase ( self ):
return len(self.sp_model ) + self.offset
def _lowerCAmelCase ( self ):
A : Optional[int] = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
A : List[Any] = self.__dict__.copy()
A : Union[str, Any] = None
return state
def __setstate__( self, lowerCamelCase__ ):
A : List[Any] = d
# for backward compatibility
if not hasattr(self, """sp_model_kwargs""" ):
A : int = {}
A : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
return self.sp_model.encode(lowerCamelCase__, out_type=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
A : List[str] = self.sp_model.piece_to_id(lowerCamelCase__ )
return sp_id + self.offset
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
A : Dict = self.sp_model.IdToPiece(index - self.offset )
return token
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[int] = []
A : Optional[Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
A : int = []
else:
current_sub_tokens.append(lowerCamelCase__ )
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def _lowerCAmelCase ( self, lowerCamelCase__=False ):
return 1
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = False ):
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase__ )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A : Any = os.path.join(
lowerCamelCase__, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__, """wb""" ) as fi:
A : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 116
| 0
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase_ ( __magic_name__ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'CLIPImageProcessor'
lowercase = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , A=None , A=None , **A ) -> Union[str, Any]:
UpperCAmelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A , )
UpperCAmelCase : List[Any] = kwargs.pop("""feature_extractor""" )
UpperCAmelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A , A )
def __call__( self , A=None , A=None , A=None , **A ) -> Tuple:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCAmelCase : int = self.tokenizer(A , return_tensors=A , **A )
if images is not None:
UpperCAmelCase : List[str] = self.image_processor(A , return_tensors=A , **A )
if text is not None and images is not None:
UpperCAmelCase : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) , tensor_type=A )
def _lowercase( self , *A , **A ) -> str:
return self.tokenizer.batch_decode(*A , **A )
def _lowercase( self , *A , **A ) -> List[str]:
return self.tokenizer.decode(*A , **A )
@property
def _lowercase( self ) -> Dict:
UpperCAmelCase : Dict = self.tokenizer.model_input_names
UpperCAmelCase : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowercase( self ) -> Optional[int]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A , )
return self.image_processor_class
@property
def _lowercase( self ) -> Optional[int]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , A , )
return self.image_processor
| 338
|
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase ) -> bool:
UpperCAmelCase : Tuple = len(_lowercase ) + 1
UpperCAmelCase : List[Any] = len(_lowercase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase : str = [[0 for i in range(_lowercase )] for j in range(_lowercase )]
# since string of zero length match pattern of zero length
UpperCAmelCase : int = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _lowercase ):
UpperCAmelCase : str = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _lowercase ):
UpperCAmelCase : Optional[Any] = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _lowercase ):
for j in range(1 , _lowercase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase : Union[str, Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase : List[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase : Optional[int] = dp[i - 1][j]
else:
UpperCAmelCase : Any = 0
else:
UpperCAmelCase : str = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
a : List[str] = """aab"""
a : Optional[int] = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 338
| 1
|
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __lowercase ( snake_case_ : Any ,snake_case_ : List[str] ,snake_case_ : Optional[int] ,snake_case_ : Any=1024 ) ->str:
'''simple docstring'''
__A : int = [], []
__A : Tuple = list(zip(_A ,_A ) )
__A : str = sorted_examples[0]
def is_too_big(snake_case_ : List[str] ):
return tok(_A ,return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__A : List[str] = new_src + """ """ + src
__A : Dict = new_tgt + """ """ + tgt
if is_too_big(_A ) or is_too_big(_A ): # cant fit, finalize example
finished_src.append(_A )
finished_tgt.append(_A )
__A : Optional[int] = src, tgt
else: # can fit, keep adding
__A : Optional[Any] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_A )
finished_tgt.append(_A )
return finished_src, finished_tgt
def __lowercase ( snake_case_ : Tuple ,snake_case_ : str ,snake_case_ : int ,snake_case_ : Tuple ) ->str:
'''simple docstring'''
__A : int = Path(_A )
save_path.mkdir(exist_ok=_A )
for split in ["train"]:
__A : Any = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
__A : Any = [x.rstrip() for x in Path(_A ).open().readlines()]
__A : Optional[int] = [x.rstrip() for x in Path(_A ).open().readlines()]
__A : Optional[Any] = pack_examples(_A ,_A ,_A ,_A )
print(F"""packed {split} split from {len(_A )} examples -> {len(_A )}.""" )
Path(save_path / F"""{split}.source""" ).open('''w''' ).write('''\n'''.join(_A ) )
Path(save_path / F"""{split}.target""" ).open('''w''' ).write('''\n'''.join(_A ) )
for split in ["val", "test"]:
__A : List[Any] = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(_A ,save_path / F"""{split}.source""" )
shutil.copyfile(_A ,save_path / F"""{split}.target""" )
def __lowercase ( ) ->Union[str, Any]:
'''simple docstring'''
__A : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' ,type=_A ,help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' ,type=_A ,default=128 )
parser.add_argument('''--data_dir''' ,type=_A )
parser.add_argument('''--save_path''' ,type=_A )
__A : int = parser.parse_args()
__A : Any = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_A ,Path(args.data_dir ) ,args.max_seq_len ,args.save_path )
if __name__ == "__main__":
packer_cli()
| 179
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowercase ( _A , _A , _A ) -> int:
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE : int = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
SCREAMING_SNAKE_CASE : List[Any] = F"{src_lang}-{tgt_lang}"
SCREAMING_SNAKE_CASE : List[str] = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(_A , exist_ok=_A )
SCREAMING_SNAKE_CASE : int = os.path.join(_A , """README.md""" )
print(F"Generating {path}" )
with open(_A , """w""" , encoding="""utf-8""" ) as f:
f.write(_A )
# make sure we are under the root of the project
UpperCAmelCase__ : List[str] = Path(__file__).resolve().parent.parent.parent
UpperCAmelCase__ : Dict = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = model_name.split("""-""")
UpperCAmelCase__ : Tuple = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 245
| 0
|
def SCREAMING_SNAKE_CASE ( ) -> int:
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Union[str, Any] = 1
while len(__lowerCAmelCase ) < 1E6:
constant.append(str(__lowerCAmelCase ) )
i += 1
UpperCamelCase__ : Dict = "".join(__lowerCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 196
|
import re
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bool:
UpperCamelCase__ : Union[str, Any] = re.compile(
R"^(?:0|94|\+94|0{2}94)" R"7(0|1|2|4|5|6|7|8)" R"(-| |)" R"\d{7}$" )
return bool(re.search(__lowerCAmelCase , __lowerCAmelCase ) )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] ='''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 196
| 1
|
from statistics import mean, stdev
def SCREAMING_SNAKE_CASE__ ( __a , __a = 3 ):
snake_case_ : int = min(__a )
snake_case_ : Union[str, Any] = max(__a )
# normalize data
return [round((x - x_min) / (x_max - x_min) , __a ) for x in data]
def SCREAMING_SNAKE_CASE__ ( __a , __a = 3 ):
snake_case_ : List[Any] = mean(__a )
snake_case_ : int = stdev(__a )
# standardize data
return [round((x - mu) / (sigma) , __a ) for x in data]
| 327
|
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
while b:
snake_case_ ,snake_case_ : Any = b, a % b
return a
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
return a if b == 0 else euclidean_gcd_recursive(__a , a % b )
def SCREAMING_SNAKE_CASE__ ( ):
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 327
| 1
|
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :str = field(
default=A__ , metadata={"help": "Model type selected in the list: " + ", ".join(A__ )} )
UpperCAmelCase_ :str = field(
default=A__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
UpperCAmelCase_ :int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase_ :int = field(
default=128 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
UpperCAmelCase_ :int = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
UpperCAmelCase_ :int = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
UpperCAmelCase_ :bool = field(
default=A__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
UpperCAmelCase_ :bool = field(
default=A__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
UpperCAmelCase_ :float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
UpperCAmelCase_ :int = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
UpperCAmelCase_ :int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
UpperCAmelCase_ :int = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[Any] = "train"
UpperCAmelCase_ :Dict = "dev"
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :SquadDataTrainingArguments
UpperCAmelCase_ :List[SquadFeatures]
UpperCAmelCase_ :Split
UpperCAmelCase_ :bool
def __init__( self , __A , __A , __A = None , __A = Split.train , __A = False , __A = None , __A = "pt" , ) -> int:
lowerCAmelCase_ :List[Any] = args
lowerCAmelCase_ :Any = is_language_sensitive
lowerCAmelCase_ :Optional[Any] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__A , __A ):
try:
lowerCAmelCase_ :Tuple = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
lowerCAmelCase_ :Optional[int] = mode
# Load data features from cache or dataset file
lowerCAmelCase_ :List[str] = """v2""" if args.version_2_with_negative else """v1"""
lowerCAmelCase_ :int = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase_ :Union[str, Any] = cached_features_file + """.lock"""
with FileLock(__A ):
if os.path.exists(__A ) and not args.overwrite_cache:
lowerCAmelCase_ :Dict = time.time()
lowerCAmelCase_ :str = torch.load(__A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCAmelCase_ :str = self.old_features["""features"""]
lowerCAmelCase_ :Tuple = self.old_features.get("""dataset""" , __A )
lowerCAmelCase_ :List[Any] = self.old_features.get("""examples""" , __A )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
""" future run""" )
else:
if mode == Split.dev:
lowerCAmelCase_ :Any = self.processor.get_dev_examples(args.data_dir )
else:
lowerCAmelCase_ :str = self.processor.get_train_examples(args.data_dir )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__A , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__A , )
lowerCAmelCase_ :str = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , __A , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self , __A ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
lowerCAmelCase_ :int = self.features[i]
lowerCAmelCase_ :List[Any] = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCAmelCase_ :Tuple = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCAmelCase_ :Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCAmelCase_ :Optional[int] = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCAmelCase_ :List[str] = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCAmelCase_ :List[str] = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCAmelCase_ :Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCAmelCase_ :Optional[Any] = torch.tensor(feature.start_position , dtype=torch.long )
lowerCAmelCase_ :List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 1
|
"""simple docstring"""
def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :Dict = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase_ :str = values[index] + knapsack(
lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 )
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def snake_case_ ( A_ : Dict, A_ : str, A_ : str, A_ : Path, A_ : str = None, A_ : str = None, A_ : str = None, ):
'''simple docstring'''
if config_name_or_path is None:
_lowerCamelCase : Dict = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
_lowerCamelCase : Optional[Any] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
_lowerCamelCase : Union[str, Any] = question_encoder_name_or_path
_lowerCamelCase : Optional[int] = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
_lowerCamelCase : Optional[Any] = RagConfig.from_pretrained(A_ )
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(A_ )
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(A_ )
_lowerCamelCase : Optional[int] = gen_config
_lowerCamelCase : List[str] = question_encoder_config
_lowerCamelCase : Union[str, Any] = model_class.from_pretrained_question_encoder_generator(
A_, A_, config=A_ )
rag_model.save_pretrained(A_ )
# Sanity check.
model_class.from_pretrained(A_ )
# Save tokenizers.
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(A_ )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(A_ )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 72
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Optional[int] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : str = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
_lowerCamelCase : Optional[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_lowerCamelCase : Any = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
_lowerCamelCase : str = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
_lowerCamelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : int = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
| 72
| 1
|
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = np.max(a_, axis=-1, keepdims=a_ )
lowerCamelCase : Dict = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=a_ )
class _lowercase ( __UpperCAmelCase ):
def _UpperCamelCase ( self , **UpperCAmelCase_ ) -> Optional[int]:
lowerCamelCase : Optional[int] = {}
if "second_text" in kwargs:
lowerCamelCase : str = kwargs['second_text']
return preprocess_kwargs, {}, {}
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_=None ) -> Optional[int]:
return self.tokenizer(UpperCAmelCase_ , text_pair=UpperCAmelCase_ , return_tensors=self.framework )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Union[str, Any]:
return self.model(**UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> str:
lowerCamelCase : Optional[Any] = model_outputs.logits[0].numpy()
lowerCamelCase : str = softmax(UpperCAmelCase_ )
lowerCamelCase : str = np.argmax(UpperCAmelCase_ )
lowerCamelCase : Tuple = self.model.config.idalabel[best_class]
lowerCamelCase : Dict = probabilities[best_class].item()
lowerCamelCase : str = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 205
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'sew-d'
def __init__( self , UpperCAmelCase_=32 , UpperCAmelCase_=768 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=3072 , UpperCAmelCase_=2 , UpperCAmelCase_=512 , UpperCAmelCase_=256 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=("p2c", "c2p") , UpperCAmelCase_="layer_norm" , UpperCAmelCase_="gelu_python" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-7 , UpperCAmelCase_=1E-5 , UpperCAmelCase_="group" , UpperCAmelCase_="gelu" , UpperCAmelCase_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCAmelCase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCAmelCase_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCAmelCase_=False , UpperCAmelCase_=128 , UpperCAmelCase_=16 , UpperCAmelCase_=True , UpperCAmelCase_=0.05 , UpperCAmelCase_=10 , UpperCAmelCase_=2 , UpperCAmelCase_=0.0 , UpperCAmelCase_=10 , UpperCAmelCase_=0 , UpperCAmelCase_="mean" , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=256 , UpperCAmelCase_=0 , UpperCAmelCase_=1 , UpperCAmelCase_=2 , **UpperCAmelCase_ , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
lowerCamelCase : Any = hidden_size
lowerCamelCase : Any = feat_extract_norm
lowerCamelCase : List[str] = feat_extract_activation
lowerCamelCase : str = list(UpperCAmelCase_ )
lowerCamelCase : Any = list(UpperCAmelCase_ )
lowerCamelCase : str = list(UpperCAmelCase_ )
lowerCamelCase : List[Any] = conv_bias
lowerCamelCase : Optional[int] = num_conv_pos_embeddings
lowerCamelCase : str = num_conv_pos_embedding_groups
lowerCamelCase : Optional[int] = len(self.conv_dim )
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : str = squeeze_factor
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : List[Any] = position_buckets
lowerCamelCase : Union[str, Any] = share_att_key
lowerCamelCase : Optional[int] = relative_attention
lowerCamelCase : Tuple = norm_rel_ebd
lowerCamelCase : Union[str, Any] = list(UpperCAmelCase_ )
lowerCamelCase : List[Any] = hidden_act
lowerCamelCase : Optional[Any] = num_attention_heads
lowerCamelCase : Tuple = hidden_dropout
lowerCamelCase : List[Any] = attention_dropout
lowerCamelCase : Optional[Any] = activation_dropout
lowerCamelCase : List[str] = feat_proj_dropout
lowerCamelCase : List[str] = final_dropout
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : int = feature_layer_norm_eps
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase : Any = apply_spec_augment
lowerCamelCase : Optional[int] = mask_time_prob
lowerCamelCase : Optional[Any] = mask_time_length
lowerCamelCase : str = mask_time_min_masks
lowerCamelCase : List[Any] = mask_feature_prob
lowerCamelCase : int = mask_feature_length
lowerCamelCase : List[Any] = mask_feature_min_masks
# ctc loss
lowerCamelCase : Optional[Any] = ctc_loss_reduction
lowerCamelCase : Union[str, Any] = ctc_zero_infinity
# sequence classification
lowerCamelCase : Optional[Any] = use_weighted_layer_sum
lowerCamelCase : Dict = classifier_proj_size
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 205
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='Salesforce/blip-image-captioning-base'
lowerCamelCase__ =(
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
lowerCamelCase__ ='image_captioner'
lowerCamelCase__ =AutoModelForVisionaSeq
lowerCamelCase__ =['image']
lowerCamelCase__ =['text']
def __init__(self , *a_ , **a_ ):
'''simple docstring'''
requires_backends(self , ['''vision'''] )
super().__init__(*a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self.pre_processor(images=a_ , return_tensors='''pt''' )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self.model.generate(**a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self.pre_processor.batch_decode(a_ , skip_special_tokens=a_ )[0].strip()
| 102
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
SCREAMING_SNAKE_CASE : Any = subprocess.check_output(F'git diff --name-only {fork_point_sha}'.split()).decode("""utf-8""").split()
SCREAMING_SNAKE_CASE : Union[str, Any] = """|""".join(sys.argv[1:])
SCREAMING_SNAKE_CASE : int = re.compile(rF'^({joined_dirs}).*?\.py$')
SCREAMING_SNAKE_CASE : str = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 102
| 1
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def A_ ( A__ , A__ ) -> np.array:
a__ : Optional[Any] = F'{sampling_rate}'
a__ : Dict = '1'
a__ : Optional[Any] = 'f32le'
a__ : str = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(A__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
a__ : int = ffmpeg_process.communicate(A__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
a__ : List[Any] = output_stream[0]
a__ : Union[str, Any] = np.frombuffer(A__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def A_ ( A__ , A__ , A__ = "f32le" , ) -> Optional[int]:
a__ : Dict = F'{sampling_rate}'
a__ : Optional[int] = '1'
if format_for_conversion == "s16le":
a__ : Union[str, Any] = 2
elif format_for_conversion == "f32le":
a__ : Optional[Any] = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
a__ : Dict = platform.system()
if system == "Linux":
a__ : Tuple = 'alsa'
a__ : Any = 'default'
elif system == "Darwin":
a__ : Tuple = 'avfoundation'
a__ : Tuple = ':0'
elif system == "Windows":
a__ : Tuple = 'dshow'
a__ : Optional[Any] = 'default'
a__ : Optional[int] = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
a__ : Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
a__ : List[Any] = _ffmpeg_stream(A__ , A__ )
for item in iterator:
yield item
def A_ ( A__ , A__ , A__ = None , A__ = None , A__ = "f32le" , ) -> Any:
if stream_chunk_s is not None:
a__ : str = stream_chunk_s
else:
a__ : Any = chunk_length_s
a__ : List[str] = ffmpeg_microphone(A__ , A__ , format_for_conversion=A__ )
if format_for_conversion == "s16le":
a__ : Any = np.intaa
a__ : Dict = 2
elif format_for_conversion == "f32le":
a__ : Any = np.floataa
a__ : List[str] = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
a__ : int = chunk_length_s / 6
a__ : Optional[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(A__ , (int, float) ):
a__ : Optional[Any] = [stride_length_s, stride_length_s]
a__ : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
a__ : Tuple = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
a__ : int = datetime.datetime.now()
a__ : Optional[Any] = datetime.timedelta(seconds=A__ )
for item in chunk_bytes_iter(A__ , A__ , stride=(stride_left, stride_right) , stream=A__ ):
# Put everything back in numpy scale
a__ : str = np.frombuffer(item['raw'] , dtype=A__ )
a__ : Any = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
a__ : Dict = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def A_ ( A__ , A__ , A__ , A__ = False ) -> Dict:
a__ : Optional[int] = B''
a__ , a__ : Optional[Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
a__ : Any = 0
for raw in iterator:
acc += raw
if stream and len(A__ ) < chunk_len:
a__ : int = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(A__ ) >= chunk_len:
# We are flushing the accumulator
a__ : Tuple = (_stride_left, stride_right)
a__ : Dict = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
a__ : List[str] = False
yield item
a__ : Dict = stride_left
a__ : Tuple = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(A__ ) > stride_left:
a__ : List[str] = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
a__ : Optional[Any] = False
yield item
def A_ ( A__ , A__ ) -> List[Any]:
a__ : str = 2**24 # 16Mo
try:
with subprocess.Popen(A__ , stdout=subprocess.PIPE , bufsize=A__ ) as ffmpeg_process:
while True:
a__ : Optional[int] = ffmpeg_process.stdout.read(A__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 225
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowercase : Union[str, Any] = data_utils.TransfoXLTokenizer
lowercase : Optional[int] = data_utils.TransfoXLCorpus
lowercase : List[Any] = data_utils
lowercase : Tuple = data_utils
def A_ ( A__ , A__ , A__ , A__ ) -> Optional[Any]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(A__ , 'rb' ) as fp:
a__ : int = pickle.load(A__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
a__ : int = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
a__ : List[Any] = corpus.vocab.__dict__
torch.save(A__ , A__ )
a__ : Dict = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , A__ )
a__ : Optional[int] = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(A__ , A__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
a__ : Union[str, Any] = os.path.abspath(A__ )
a__ : Optional[Any] = os.path.abspath(A__ )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
a__ : Dict = TransfoXLConfig()
else:
a__ : Dict = TransfoXLConfig.from_json_file(A__ )
print(F'Building PyTorch model from configuration: {config}' )
a__ : Optional[int] = TransfoXLLMHeadModel(A__ )
a__ : int = load_tf_weights_in_transfo_xl(A__ , A__ , A__ )
# Save pytorch-model
a__ : Any = os.path.join(A__ , A__ )
a__ : Dict = os.path.join(A__ , A__ )
print(F'Save PyTorch model to {os.path.abspath(A__ )}' )
torch.save(model.state_dict() , A__ )
print(F'Save configuration file to {os.path.abspath(A__ )}' )
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
lowercase : Any = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 225
| 1
|
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__lowerCAmelCase = parse(importlib.metadata.version("""torch"""))
def UpperCAmelCase_ (__a : Union[str, Version] , __a : str , __a : str ):
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
_a : Optional[Any] = STR_OPERATION_TO_FUNC[operation]
if isinstance(__a , __a ):
_a : Dict = parse(importlib.metadata.version(__a ) )
return operation(__a , parse(__a ) )
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
return compare_versions(__a , __a , __a )
| 271
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar("""T""")
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple ,_a : T ):
'''simple docstring'''
_a : List[str] = data
_a : Node[T] | None = None
def __str__( self : Dict ):
'''simple docstring'''
return F"""{self.data}"""
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
_a : Node[T] | None = None
def __iter__( self : str ):
'''simple docstring'''
_a : Tuple = self.top
while node:
yield node.data
_a : int = node.next
def __str__( self : str ):
'''simple docstring'''
return "->".join([str(_a ) for item in self] )
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __lowercase ( self : str ):
'''simple docstring'''
return self.top is None
def __lowercase ( self : List[Any] ,_a : T ):
'''simple docstring'''
_a : int = Node(_a )
if not self.is_empty():
_a : Optional[Any] = self.top
_a : List[str] = node
def __lowercase ( self : Tuple ):
'''simple docstring'''
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top ,_a )
_a : List[Any] = self.top
_a : int = self.top.next
return pop_node.data
def __lowercase ( self : List[str] ):
'''simple docstring'''
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 271
| 1
|
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if nth_term == "":
return [""]
A_ : Tuple = int(lowerCamelCase__ )
A_ : List[Any] = int(lowerCamelCase__ )
A_ : list[str] = []
for temp in range(int(lowerCamelCase__ ) ):
series.append(f'1 / {pow(temp + 1 , int(lowerCamelCase__ ) )}' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase :Dict = int(input('''Enter the last number (nth term) of the P-Series'''))
lowerCamelCase :List[str] = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 135
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=None , lowercase=2 , ):
A_ : List[str] = parent
A_ : str = batch_size
A_ : Optional[Any] = image_size
A_ : List[str] = patch_size
A_ : List[str] = num_channels
A_ : List[str] = is_training
A_ : str = use_labels
A_ : List[str] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Any = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Optional[int] = type_sequence_label_size
A_ : Any = initializer_range
A_ : int = scope
A_ : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : Dict = (image_size // patch_size) ** 2
A_ : List[str] = num_patches + 1
def _a (self ):
A_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Optional[Any] = None
if self.use_labels:
A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _a (self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[str] = ViTModel(config=lowercase )
model.to(lowercase )
model.eval()
A_ : List[str] = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[str] = ViTForMaskedImageModeling(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Tuple = model(lowercase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A_ : Union[str, Any] = 1
A_ : Any = ViTForMaskedImageModeling(lowercase )
model.to(lowercase )
model.eval()
A_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Optional[int] = model(lowercase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Dict = self.type_sequence_label_size
A_ : str = ViTForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : List[str] = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Any = 1
A_ : str = ViTForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Union[str, Any] = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a (self ):
A_ : str = self.prepare_config_and_inputs()
(
(
A_
), (
A_
), (
A_
),
) : Optional[int] = config_and_inputs
A_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def _a (self ):
A_ : Any = ViTModelTester(self )
A_ : str = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def _a (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def _a (self ):
pass
def _a (self ):
A_, A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Union[str, Any] = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def _a (self ):
A_, A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowercase )
A_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[str] = [*signature.parameters.keys()]
A_ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def _a (self ):
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase )
def _a (self ):
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def _a (self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = ViTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a ( ):
'''simple docstring'''
A_ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a (self ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def _a (self ):
A_ : Optional[int] = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(lowercase )
A_ : List[str] = self.default_image_processor
A_ : Tuple = prepare_img()
A_ : int = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
A_ : str = model(**lowercase )
# verify the logits
A_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
@slow
def _a (self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
A_ : Optional[int] = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(lowercase )
A_ : List[Any] = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=480 )
A_ : Dict = prepare_img()
A_ : str = image_processor(images=lowercase , return_tensors="""pt""" )
A_ : int = inputs.pixel_values.to(lowercase )
# forward pass
with torch.no_grad():
A_ : int = model(lowercase , interpolate_pos_encoding=lowercase )
# verify the logits
A_ : int = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , lowercase )
A_ : List[Any] = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _a (self ):
A_ : List[Any] = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" )
A_ : int = self.default_image_processor
A_ : Any = prepare_img()
A_ : List[str] = image_processor(images=lowercase , return_tensors="""pt""" )
A_ : Any = inputs.pixel_values.to(lowercase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A_ : Optional[Any] = model(lowercase )
| 135
| 1
|
def a ( _UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = int(_UpperCamelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(_UpperCamelCase )
__UpperCAmelCase : Optional[int] = divmod(_UpperCamelCase , 2 )
return binary_recursive(_UpperCamelCase ) + str(_UpperCamelCase )
def a ( _UpperCAmelCase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : str = str(_UpperCamelCase ).strip()
if not number:
raise ValueError('''No input value was provided''' )
__UpperCAmelCase : List[Any] = '''-''' if number.startswith('''-''' ) else ''''''
__UpperCAmelCase : List[Any] = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f'{negative}0b{binary_recursive(int(_UpperCamelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 226
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 337
| 0
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=3 , A=32 , A=3 , A=10 , A=[10, 20, 30, 40] , A=[1, 1, 2, 1] , A=True , A=True , A="relu" , A=3 , A=None , ) -> int:
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = image_size
lowerCamelCase = num_channels
lowerCamelCase = embeddings_size
lowerCamelCase = hidden_sizes
lowerCamelCase = depths
lowerCamelCase = is_training
lowerCamelCase = use_labels
lowerCamelCase = hidden_act
lowerCamelCase = num_labels
lowerCamelCase = scope
lowerCamelCase = len(A )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __A ( self , A , A , A ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = TFResNetModel(config=A )
lowerCamelCase = model(A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __A ( self , A , A , A ) -> Any:
'''simple docstring'''
lowerCamelCase = self.num_labels
lowerCamelCase = TFResNetForImageClassification(A )
lowerCamelCase = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase = config_and_inputs
lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Tuple = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCamelCase : str = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase : List[Any] = False
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : int = False
UpperCamelCase : Any = False
UpperCamelCase : List[str] = False
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = TFResNetModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=A , has_text_modality=A )
def __A ( self ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ) -> int:
'''simple docstring'''
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def __A ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def __A ( self ) -> int:
'''simple docstring'''
pass
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(A )
lowerCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase = [*signature.parameters.keys()]
lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
def check_hidden_states_output(A , A , A ):
lowerCamelCase = model_class(A )
lowerCamelCase = model(**self._prepare_for_class(A , A ) )
lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase = self.model_tester.num_stages
self.assertEqual(len(A ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase = layer_type
lowerCamelCase = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase = True
check_hidden_states_output(A , A , A )
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def __A ( self ) -> str:
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = TFResNetModel.from_pretrained(A )
self.assertIsNotNone(A )
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase = self.default_image_processor
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(images=A , return_tensors="""tf""" )
# forward pass
lowerCamelCase = model(**A )
# verify the logits
lowerCamelCase = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A )
lowerCamelCase = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , A , atol=1e-4 ) )
| 360
|
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
UpperCAmelCase : Dict = tf.data.AUTOTUNE
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=lowerCamelCase__ , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=lowerCamelCase__ , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=lowerCamelCase__ , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=lowerCamelCase__ , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=lowerCamelCase__ , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=lowerCamelCase__ , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=lowerCamelCase__ , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=lowerCamelCase__ , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=lowerCamelCase__ , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCamelCase__ , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=lowerCamelCase__ , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=lowerCamelCase__ , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=lowerCamelCase__ , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=lowerCamelCase__ , default=0.1_5 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=lowerCamelCase__ , help="""Model ID to upload to on the Hugging Face Hub.""" )
lowerCamelCase = parser.parse_args()
return args
def __lowerCamelCase ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
try:
if args.tpu_name:
lowerCamelCase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowerCamelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(lowerCamelCase__ )
tf.tpu.experimental.initialize_tpu_system(lowerCamelCase__ )
return tpu
def __lowerCamelCase ( lowerCamelCase__ : List[str] ):
'''simple docstring'''
lowerCamelCase = 0
for file in file_list:
lowerCamelCase = file.split("""/""" )[-1]
lowerCamelCase = re.search(R"""-\d+-(\d+)\.tfrecord""" , lowerCamelCase__ ).group(1 )
lowerCamelCase = int(lowerCamelCase__ )
num_samples += sample_count
return num_samples
def __lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int=None ):
'''simple docstring'''
lowerCamelCase = count_samples(lowerCamelCase__ )
lowerCamelCase = tf.data.Dataset.from_tensor_slices(lowerCamelCase__ )
if shuffle:
lowerCamelCase = dataset.shuffle(len(lowerCamelCase__ ) )
lowerCamelCase = tf.data.TFRecordDataset(lowerCamelCase__ , num_parallel_reads=lowerCamelCase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowerCamelCase = dataset.apply(tf.data.experimental.assert_cardinality(lowerCamelCase__ ) )
lowerCamelCase = dataset.map(lowerCamelCase__ , num_parallel_calls=lowerCamelCase__ )
if shuffle:
assert shuffle_buffer_size is not None
lowerCamelCase = dataset.shuffle(args.shuffle_buffer_size )
lowerCamelCase = dataset.batch(lowerCamelCase__ , drop_remainder=lowerCamelCase__ )
lowerCamelCase = dataset.map(lowerCamelCase__ , num_parallel_calls=lowerCamelCase__ )
lowerCamelCase = dataset.prefetch(lowerCamelCase__ )
return dataset
def __lowerCamelCase ( lowerCamelCase__ : Any ):
'''simple docstring'''
if not args.no_tpu:
lowerCamelCase = initialize_tpu(lowerCamelCase__ )
lowerCamelCase = tf.distribute.TPUStrategy(lowerCamelCase__ )
else:
lowerCamelCase = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
lowerCamelCase = AutoTokenizer.from_pretrained(args.tokenizer )
lowerCamelCase = AutoConfig.from_pretrained(args.pretrained_model_config )
lowerCamelCase = tokenizer.vocab_size
lowerCamelCase = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' )
lowerCamelCase = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' )
lowerCamelCase = count_samples(lowerCamelCase__ )
lowerCamelCase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowerCamelCase = steps_per_epoch * args.num_epochs
with strategy.scope():
lowerCamelCase = TFAutoModelForMaskedLM.from_config(lowerCamelCase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowerCamelCase , lowerCamelCase = create_optimizer(
num_train_steps=lowerCamelCase__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCamelCase__ , metrics=["""accuracy"""] )
def decode_fn(lowerCamelCase__ : Optional[Any] ):
lowerCamelCase = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCamelCase__ , lowerCamelCase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowerCamelCase = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase__ , mlm_probability=args.mlm_probability , mlm=lowerCamelCase__ , return_tensors="""tf""" )
def mask_with_collator(lowerCamelCase__ : List[Any] ):
# TF really needs an isin() function
lowerCamelCase = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
lowerCamelCase , lowerCamelCase = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(lowerCamelCase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCamelCase__ , )
return batch
lowerCamelCase = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowerCamelCase = prepare_dataset(
lowerCamelCase__ , decode_fn=lowerCamelCase__ , mask_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ , shuffle=lowerCamelCase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowerCamelCase = prepare_dataset(
lowerCamelCase__ , decode_fn=lowerCamelCase__ , mask_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ , shuffle=lowerCamelCase__ , )
lowerCamelCase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCamelCase__ ) )
model.fit(
lowerCamelCase__ , validation_data=lowerCamelCase__ , epochs=args.num_epochs , callbacks=lowerCamelCase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCAmelCase : Tuple = parse_args()
main(args)
| 66
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = "▁"
SCREAMING_SNAKE_CASE__ = {"vocab_file": "sentencepiece.bpe.model"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
SCREAMING_SNAKE_CASE__ = {
"facebook/nllb-200-distilled-600M": 1_024,
}
# fmt: off
SCREAMING_SNAKE_CASE__ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
def __init__( self , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , lowercase = None , lowercase=None , lowercase=False , **lowercase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase = legacy_behaviour
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , tokenizer_file=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=lowercase , **lowercase , )
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase ) )
lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase = 1
lowerCAmelCase = len(self.sp_model )
lowerCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowercase )
}
lowerCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowerCAmelCase = src_lang if src_lang is not None else """eng_Latn"""
lowerCAmelCase = self.lang_code_to_id[self._src_lang]
lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Tuple:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowercase ) -> int:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _snake_case ( self ) -> Optional[int]:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _snake_case ( self ) -> str:
return self._src_lang
@src_lang.setter
def _snake_case ( self , lowercase ) -> None:
lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
lowerCAmelCase = [1] * len(self.prefix_tokens )
lowerCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase )) + suffix_ones
return prefix_ones + ([0] * len(lowercase )) + ([0] * len(lowercase )) + suffix_ones
def _snake_case ( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , lowercase , lowercase = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , **lowercase ) -> Optional[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowerCAmelCase = src_lang
lowerCAmelCase = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase )
lowerCAmelCase = self.convert_tokens_to_ids(lowercase )
lowerCAmelCase = tgt_lang_id
return inputs
def _snake_case ( self ) -> Dict:
lowerCAmelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , lowercase ) -> List[str]:
return self.sp_model.encode(lowercase , out_type=lowercase )
def _snake_case ( self , lowercase ) -> Optional[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase = self.sp_model.PieceToId(lowercase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , lowercase ) -> Tuple:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , lowercase ) -> List[str]:
lowerCAmelCase = """""".join(lowercase ).replace(lowercase , """ """ ).strip()
return out_string
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
def _snake_case ( self , lowercase , lowercase = "eng_Latn" , lowercase = None , lowercase = "fra_Latn" , **lowercase , ) -> BatchEncoding:
lowerCAmelCase = src_lang
lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def _snake_case ( self ) -> Any:
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ) -> Union[str, Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , lowercase ) -> None:
lowerCAmelCase = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowerCAmelCase = []
lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase = [self.cur_lang_code]
lowerCAmelCase = [self.eos_token_id]
def _snake_case ( self , lowercase ) -> None:
lowerCAmelCase = self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowerCAmelCase = []
lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase = [self.cur_lang_code]
lowerCAmelCase = [self.eos_token_id]
| 46
|
from __future__ import annotations
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> tuple[float, list[float]]:
"""simple docstring"""
snake_case_ : Dict = list(range(len(_UpperCamelCase ) ) )
snake_case_ : Dict = [v / w for v, w in zip(_UpperCamelCase , _UpperCamelCase )]
index.sort(key=lambda _UpperCamelCase : ratio[i] , reverse=_UpperCamelCase )
snake_case_ : float = 0
snake_case_ : list[float] = [0] * len(_UpperCamelCase )
for i in index:
if weight[i] <= capacity:
snake_case_ : Dict = 1
max_value += value[i]
capacity -= weight[i]
else:
snake_case_ : Union[str, Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279
| 0
|
from __future__ import annotations
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Any = len(SCREAMING_SNAKE_CASE__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__lowerCamelCase : Union[str, Any] = i + 1
else:
__lowerCamelCase : Union[str, Any] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 1_1, 1_5], 9) = }""")
| 351
|
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowercase_ = logging.get_logger(__name__)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: List[str] , *a: List[Any] , **a: Optional[Any] ):
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 194
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : List[str] = """audio-spectrogram-transformer"""
def __init__( self : List[Any] , __UpperCAmelCase : str=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : Optional[int]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : int=1e-12 , __UpperCAmelCase : str=16 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Any=10 , __UpperCAmelCase : List[Any]=10 , __UpperCAmelCase : List[str]=1024 , __UpperCAmelCase : str=128 , **__UpperCAmelCase : Dict , ):
super().__init__(**__UpperCAmelCase)
a : Any = hidden_size
a : Tuple = num_hidden_layers
a : Any = num_attention_heads
a : Optional[Any] = intermediate_size
a : str = hidden_act
a : Tuple = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Optional[int] = initializer_range
a : Any = layer_norm_eps
a : Optional[int] = patch_size
a : Optional[Any] = qkv_bias
a : Optional[Any] = frequency_stride
a : Optional[Any] = time_stride
a : Tuple = max_length
a : Optional[Any] = num_mel_bins
| 40
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
| 1
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
a__ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
a__ = parser.parse_args()
a__ = '''cpu'''
a__ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
a__ = '''path-to-your-trained-model'''
a__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
a__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
a__ = pipe.to(device)
# to channels last
a__ = pipe.unet.to(memory_format=torch.channels_last)
a__ = pipe.vae.to(memory_format=torch.channels_last)
a__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
a__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
a__ = torch.randn(2, 4, 64, 64)
a__ = torch.rand(1) * 999
a__ = torch.randn(2, 77, 768)
a__ = (sample, timestep, encoder_hidden_status)
try:
a__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
a__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
a__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
a__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
a__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
a__ = 666
a__ = torch.Generator(device).manual_seed(seed)
a__ = {'''generator''': generator}
if args.steps is not None:
a__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
a__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 15
|
from math import ceil
def __UpperCAmelCase ( __a : int = 1_001 ) -> int:
"""simple docstring"""
_a : Dict = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
_a : int = 2 * i + 1
_a : str = 2 * i
_a : Any = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 15
| 1
|
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Optional[int] =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
SCREAMING_SNAKE_CASE_: Optional[Any] =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __A :
a__ : str = field(
default=UpperCamelCase__ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(UpperCamelCase__ )} )
a__ : str = field(
default=UpperCamelCase__ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
a__ : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a__ : int = field(
default=128 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
a__ : int = field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
a__ : int = field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
a__ : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a__ : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
a__ : float = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a__ : int = field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a__ : int = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
a__ : int = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class __A ( UpperCamelCase__ ):
a__ : str = """train"""
a__ : List[Any] = """dev"""
class __A ( UpperCamelCase__ ):
a__ : SquadDataTrainingArguments
a__ : List[SquadFeatures]
a__ : Split
a__ : bool
def __init__(self : Optional[int] , __a : SquadDataTrainingArguments , __a : PreTrainedTokenizer , __a : Optional[int] = None , __a : Union[str, Split] = Split.train , __a : Optional[bool] = False , __a : Optional[str] = None , __a : Optional[str] = "pt" , ):
UpperCAmelCase_ = args
UpperCAmelCase_ = is_language_sensitive
UpperCAmelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__a , __a ):
try:
UpperCAmelCase_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
UpperCAmelCase_ = mode
# Load data features from cache or dataset file
UpperCAmelCase_ = "v2" if args.version_2_with_negative else "v1"
UpperCAmelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ = cached_features_file + ".lock"
with FileLock(__a ):
if os.path.exists(__a ) and not args.overwrite_cache:
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = torch.load(__a )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
UpperCAmelCase_ = self.old_features["features"]
UpperCAmelCase_ = self.old_features.get("dataset" , __a )
UpperCAmelCase_ = self.old_features.get("examples" , __a )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
" future run" )
else:
if mode == Split.dev:
UpperCAmelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
UpperCAmelCase_ = self.processor.get_train_examples(args.data_dir )
UpperCAmelCase_ , UpperCAmelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__a , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__a , )
UpperCAmelCase_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , __a , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__(self : List[Any] ):
return len(self.features )
def __getitem__(self : Dict , __a : Optional[int] ):
# Convert to Tensors and build dataset
UpperCAmelCase_ = self.features[i]
UpperCAmelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
UpperCAmelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
UpperCAmelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
UpperCAmelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
UpperCAmelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
UpperCAmelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
UpperCAmelCase_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
UpperCAmelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
UpperCAmelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 1
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __A ( unittest.TestCase ):
def __init__(self : str , __a : Optional[Any] , __a : Optional[Any]=13 , __a : int=30 , __a : Union[str, Any]=2 , __a : Dict=3 , __a : List[Any]=True , __a : Optional[Any]=True , __a : List[Any]=32 , __a : Any=5 , __a : str=4 , __a : Optional[int]=37 , __a : Optional[int]="gelu" , __a : List[str]=0.1 , __a : Tuple=0.1 , __a : List[str]=10 , __a : Optional[int]=0.02 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def _lowercase (self : Any ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , )
return config, pixel_values
def _lowercase (self : Dict , __a : Any , __a : List[Any] ):
UpperCAmelCase_ = FlaxViTModel(config=__a )
UpperCAmelCase_ = model(__a )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (self.image_size, self.image_size)
UpperCAmelCase_ = (self.patch_size, self.patch_size)
UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _lowercase (self : Tuple , __a : str , __a : Any ):
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = FlaxViTForImageClassification(config=__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = FlaxViTForImageClassification(__a )
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _lowercase (self : Any ):
UpperCAmelCase_ = FlaxViTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def _lowercase (self : Tuple ):
self.config_tester.run_common_tests()
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__a )
UpperCAmelCase_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(__a , __a )
UpperCAmelCase_ = model_class(__a )
@jax.jit
def model_jitted(__a : Tuple , **__a : List[Any] ):
return model(pixel_values=__a , **__a )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase (self : Tuple ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained("google/vit-base-patch16-224" )
UpperCAmelCase_ = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__a )
| 1
| 1
|
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = math.inf , _UpperCamelCase = -math.inf , _UpperCamelCase = math.inf , _UpperCamelCase = -math.inf , _UpperCamelCase = False , _UpperCamelCase = 100 , _UpperCamelCase = 0.01 , _UpperCamelCase = 1 , ):
__lowerCAmelCase : Any = False
__lowerCAmelCase : Dict = search_prob
__lowerCAmelCase : List[Any] = start_temperate
__lowerCAmelCase : Dict = []
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Union[str, Any] = None
while not search_end:
__lowerCAmelCase : Tuple = current_state.score()
if best_state is None or current_score > best_state.score():
__lowerCAmelCase : Optional[Any] = current_state
scores.append(_UpperCamelCase )
iterations += 1
__lowerCAmelCase : int = None
__lowerCAmelCase : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__lowerCAmelCase : str = random.randint(0 , len(_UpperCamelCase ) - 1 ) # picking a random neighbor
__lowerCAmelCase : Optional[int] = neighbors.pop(_UpperCamelCase )
__lowerCAmelCase : int = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__lowerCAmelCase : int = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__lowerCAmelCase : List[str] = picked_neighbor
else:
__lowerCAmelCase : Union[str, Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__lowerCAmelCase : Optional[Any] = picked_neighbor
__lowerCAmelCase : Any = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__lowerCAmelCase : List[str] = True
else:
__lowerCAmelCase : str = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_UpperCamelCase ) , _UpperCamelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCamelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCamelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
lowerCamelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCamelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
return (3 * x**2) - (6 * y)
lowerCamelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCamelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f'{local_min.score()}'
)
lowerCamelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCamelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f'{local_min.score()}'
)
| 182
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 182
| 1
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
A : int = logging.get_logger(__name__)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : Optional[int] , *__magic_name__ : str , **__magic_name__ : Tuple ) -> None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , __magic_name__ , )
super().__init__(*__magic_name__ , **__magic_name__ )
| 118
|
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = CLIPTokenizer
_lowerCamelCase = CLIPTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = {}
_lowerCamelCase = False
def lowercase ( self : Tuple ) -> int:
super().setUp()
# fmt: off
lowercase : Dict = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase : List[Any] = dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowercase : List[str] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
lowercase : Union[str, Any] = {'unk_token': '<unk>'}
lowercase : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
lowercase : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase ) )
def lowercase ( self : Dict, **lowerCAmelCase : Optional[Any] ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase ( self : Optional[Any], **lowerCAmelCase : Tuple ) -> str:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase ( self : Optional[Any], lowerCAmelCase : List[Any] ) -> Optional[Any]:
lowercase : int = 'lower newer'
lowercase : str = 'lower newer'
return input_text, output_text
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
lowercase : str = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
lowercase : Union[str, Any] = 'lower newer'
lowercase : List[str] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
lowercase : List[str] = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
lowercase : int = tokens + [tokenizer.unk_token]
lowercase : Optional[int] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), lowerCAmelCase )
@require_ftfy
def lowercase ( self : Tuple ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : List[str] = self.tokenizer_class.from_pretrained(lowerCAmelCase, **lowerCAmelCase )
lowercase : Dict = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase, **lowerCAmelCase )
lowercase : Optional[int] = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
lowercase : int = tokenizer_s.tokenize(lowerCAmelCase )
lowercase : Any = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowercase : Optional[int] = 'xa\u0303y' + ' ' + 'x\xe3y'
lowercase : int = tokenizer_s.tokenize(lowerCAmelCase )
lowercase : Optional[Any] = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
# Test that the tokenization is identical on unicode of space type
lowercase : Any = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowercase : Optional[Any] = tokenizer_s.tokenize(lowerCAmelCase )
lowercase : Union[str, Any] = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
# Test that the tokenization is identical on unicode of line break type
lowercase : Optional[Any] = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowercase : str = tokenizer_s.tokenize(lowerCAmelCase )
lowercase : str = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : Any ) -> List[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
lowercase : Union[str, Any] = f'''{text_of_1_token} {text_of_1_token}'''
lowercase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase, use_fast=lowerCAmelCase, )
lowercase : Dict = tokenizer_r(lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0], (0, len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1], (len(lowerCAmelCase ) + 1, len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )), )
lowercase : Tuple = f''' {text}'''
lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase, use_fast=lowerCAmelCase, )
lowercase : Dict = tokenizer_r(lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(lowerCAmelCase ) + 1, 1 + len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )), )
def lowercase ( self : Dict ) -> List[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCAmelCase ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def lowercase ( self : List[Any] ) -> str:
super().test_tokenization_python_rust_equals()
def lowercase ( self : Dict ) -> Tuple:
# CLIP always lower cases letters
pass
| 255
| 0
|
def lowercase( UpperCamelCase_ ) -> list:
'''simple docstring'''
UpperCamelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
UpperCamelCase = True
for i in range(0 , len(UpperCamelCase_ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
UpperCamelCase , UpperCamelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCamelCase = False
for i in range(1 , len(UpperCamelCase_ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
UpperCamelCase , UpperCamelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCamelCase = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_SCREAMING_SNAKE_CASE = [int(x) for x in input().split()]
# inputing elements of the list in one line
_SCREAMING_SNAKE_CASE = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 165
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ):
@register_to_config
def __init__( self : Any , lowerCamelCase_ : int = 6_5536 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 2 , lowerCamelCase_ : int = 2 , lowerCamelCase_ : int = 0 , lowerCamelCase_ : str = "fourier" , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase_ : Tuple[str] = "UNetMidBlock1D" , lowerCamelCase_ : str = None , lowerCamelCase_ : Tuple[int] = (32, 32, 64) , lowerCamelCase_ : str = None , lowerCamelCase_ : int = 8 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : bool = False , ):
"""simple docstring"""
super().__init__()
UpperCamelCase = sample_size
# time
if time_embedding_type == "fourier":
UpperCamelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase_ , log=lowerCamelCase_ , flip_sin_to_cos=lowerCamelCase_ )
UpperCamelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
UpperCamelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase_ , downscale_freq_shift=lowerCamelCase_ )
UpperCamelCase = block_out_channels[0]
if use_timestep_embedding:
UpperCamelCase = block_out_channels[0] * 4
UpperCamelCase = TimestepEmbedding(
in_channels=lowerCamelCase_ , time_embed_dim=lowerCamelCase_ , act_fn=lowerCamelCase_ , out_dim=block_out_channels[0] , )
UpperCamelCase = nn.ModuleList([] )
UpperCamelCase = None
UpperCamelCase = nn.ModuleList([] )
UpperCamelCase = None
# down
UpperCamelCase = in_channels
for i, down_block_type in enumerate(lowerCamelCase_ ):
UpperCamelCase = output_channel
UpperCamelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
UpperCamelCase = i == len(lowerCamelCase_ ) - 1
UpperCamelCase = get_down_block(
lowerCamelCase_ , num_layers=lowerCamelCase_ , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase_ )
# mid
UpperCamelCase = get_mid_block(
lowerCamelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase_ , add_downsample=lowerCamelCase_ , )
# up
UpperCamelCase = list(reversed(lowerCamelCase_ ) )
UpperCamelCase = reversed_block_out_channels[0]
if out_block_type is None:
UpperCamelCase = out_channels
else:
UpperCamelCase = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_ ):
UpperCamelCase = output_channel
UpperCamelCase = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase_ ) - 1 else final_upsample_channels
)
UpperCamelCase = i == len(lowerCamelCase_ ) - 1
UpperCamelCase = get_up_block(
lowerCamelCase_ , num_layers=lowerCamelCase_ , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase_ )
UpperCamelCase = output_channel
# out
UpperCamelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
UpperCamelCase = get_out_block(
out_block_type=lowerCamelCase_ , num_groups_out=lowerCamelCase_ , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase_ , act_fn=lowerCamelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : Union[torch.Tensor, float, int] , lowerCamelCase_ : bool = True , ):
"""simple docstring"""
UpperCamelCase = timestep
if not torch.is_tensor(lowerCamelCase_ ):
UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase_ ) and len(timesteps.shape ) == 0:
UpperCamelCase = timesteps[None].to(sample.device )
UpperCamelCase = self.time_proj(lowerCamelCase_ )
if self.config.use_timestep_embedding:
UpperCamelCase = self.time_mlp(lowerCamelCase_ )
else:
UpperCamelCase = timestep_embed[..., None]
UpperCamelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
UpperCamelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
UpperCamelCase = ()
for downsample_block in self.down_blocks:
UpperCamelCase , UpperCamelCase = downsample_block(hidden_states=lowerCamelCase_ , temb=lowerCamelCase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
UpperCamelCase = self.mid_block(lowerCamelCase_ , lowerCamelCase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
UpperCamelCase = down_block_res_samples[-1:]
UpperCamelCase = down_block_res_samples[:-1]
UpperCamelCase = upsample_block(lowerCamelCase_ , res_hidden_states_tuple=lowerCamelCase_ , temb=lowerCamelCase_ )
# 5. post-process
if self.out_block:
UpperCamelCase = self.out_block(lowerCamelCase_ , lowerCamelCase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase_ )
| 165
| 1
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ : List[str] = input('Enter image url: ').strip()
print(f'''Downloading image from {url} ...''')
lowerCamelCase__ : str = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ : int = soup.find('meta', {'property': 'og:image'})['content']
lowerCamelCase__ : Tuple = requests.get(image_url).content
lowerCamelCase__ : str = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f'''Done. Image saved to disk as {file_name}.''')
| 225
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> str:
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
SCREAMING_SNAKE_CASE_ = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__UpperCAmelCase ) ),
} , features=__UpperCAmelCase , )
return dataset
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : str ) -> int:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__UpperCAmelCase )
return filename
# FILE_CONTENT + files
lowerCamelCase__ : List[Any] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt'
SCREAMING_SNAKE_CASE_ = FILE_CONTENT
with open(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase )
return filename
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> List[str]:
import bza
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with bza.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> Any:
import gzip
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with gzip.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> int:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with lza.frame.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] ) -> Any:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__UpperCAmelCase , 'w' ) as archive:
archive.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : str ) -> str:
import tarfile
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__UpperCAmelCase , 'w' ) as f:
f.add(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> List[Any]:
import lzma
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with lzma.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) -> str:
import zipfile
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> Any:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with zstd.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.xml'
SCREAMING_SNAKE_CASE_ = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase )
return filename
lowerCamelCase__ : Optional[Any] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCamelCase__ : Dict = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCamelCase__ : Optional[int] = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCamelCase__ : List[Any] = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCamelCase__ : List[Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> Tuple:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = datasets.Dataset.from_dict(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__UpperCAmelCase ) ) as con:
SCREAMING_SNAKE_CASE_ = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__UpperCAmelCase , 'w' , newline='' ) as f:
SCREAMING_SNAKE_CASE_ = csv.DictWriter(__UpperCAmelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__UpperCAmelCase , 'w' , newline='' ) as f:
SCREAMING_SNAKE_CASE_ = csv.DictWriter(__UpperCAmelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Tuple ) -> str:
import bza
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__UpperCAmelCase , 'rb' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
SCREAMING_SNAKE_CASE_ = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__UpperCAmelCase , 'wb' ) as f:
SCREAMING_SNAKE_CASE_ = pq.ParquetWriter(__UpperCAmelCase , schema=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__UpperCAmelCase ) )] for k in DATA[0]} , schema=__UpperCAmelCase )
writer.write_table(__UpperCAmelCase )
writer.close()
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
SCREAMING_SNAKE_CASE_ = {'data': DATA}
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
SCREAMING_SNAKE_CASE_ = {'data': DATA_DICT_OF_LISTS}
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
import gzip
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__UpperCAmelCase , 'rb' ) as orig_file:
with gzip.open(__UpperCAmelCase , 'wb' ) as zipped_file:
zipped_file.writelines(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] ) -> List[str]:
import gzip
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__UpperCAmelCase , 'rb' ) as orig_file:
with gzip.open(__UpperCAmelCase , 'wb' ) as zipped_file:
zipped_file.writelines(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('nested' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__UpperCAmelCase , 'w' ) as f:
f.add(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.add(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__UpperCAmelCase , 'w' ) as f:
f.add(__UpperCAmelCase , arcname=os.path.join('nested' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = ['0', '1', '2', '3']
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> Any:
SCREAMING_SNAKE_CASE_ = ['0', '1', '2', '3']
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = ['0', '1', '2', '3']
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename('unsupported.ext' ) )
f.write(__UpperCAmelCase , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> List[Any]:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> Tuple:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) -> int:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Dict:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 225
| 1
|
def UpperCamelCase ( __magic_name__ : float ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(__magic_name__ , __magic_name__ ):
raise ValueError("""Length must be a positive.""" )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def UpperCamelCase ( __magic_name__ : float ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(__magic_name__ , __magic_name__ ):
raise ValueError("""Length must be a positive.""" )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
|
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
'''simple docstring'''
def __init__(self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=13 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : Union[str, Any]=[10, 20, 30, 40] , _UpperCAmelCase : Optional[int]=[2, 2, 3, 2] , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Tuple=37 , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : Dict=10 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : List[Any]=["stage2", "stage3", "stage4"] , _UpperCAmelCase : List[Any]=[2, 3, 4] , _UpperCAmelCase : Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = num_stages
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = initializer_range
lowercase__ = out_features
lowercase__ = out_indices
lowercase__ = scope
def lowerCamelCase__ (self : Any ) -> Dict:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ (self : int ) -> Dict:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
lowercase__ = ConvNextVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase__ (self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase__ = ConvNextVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ = None
lowercase__ = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__ = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase__ (self : List[Any] ) -> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
def lowerCamelCase__ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
A__ = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
A__ = False
A__ = False
A__ = False
A__ = False
A__ = False
def lowerCamelCase__ (self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ = ConvNextVaModelTester(self )
lowercase__ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ (self : Tuple ) -> int:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def lowerCamelCase__ (self : int ) -> str:
"""simple docstring"""
pass
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ = True
if model_class.__name__ in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]:
continue
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
lowercase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
lowercase__ = model(**_UpperCAmelCase ).loss
loss.backward()
def lowerCamelCase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ = False
lowercase__ = True
if (
model_class.__name__
in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
lowercase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
lowercase__ = model(**_UpperCAmelCase ).loss
loss.backward()
def lowerCamelCase__ (self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
def check_hidden_states_output(_UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ):
lowercase__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ConvNextVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCamelCase ( ) -> int:
"""simple docstring"""
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase__ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ (self : Any ) -> Any:
"""simple docstring"""
lowercase__ = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(_UpperCAmelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = preprocessor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )
# verify the logits
lowercase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
lowercase__ = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 146
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , A , A , A , A = None , ) -> Any:
super().__init__()
self.register_modules(transformer=A , vae=A , scheduler=A )
# create a imagenet -> id dictionary for easier use
_UpperCAmelCase : List[Any] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
_UpperCAmelCase : Tuple = int(A )
_UpperCAmelCase : Union[str, Any] = dict(sorted(self.labels.items() ) )
def __lowerCAmelCase ( self , A ) -> List[int]:
if not isinstance(A , A ):
_UpperCAmelCase : str = list(A )
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , A , A = 4.0 , A = None , A = 5_0 , A = "pil" , A = True , ) -> Union[ImagePipelineOutput, Tuple]:
_UpperCAmelCase : Optional[int] = len(A )
_UpperCAmelCase : Dict = self.transformer.config.sample_size
_UpperCAmelCase : List[str] = self.transformer.config.in_channels
_UpperCAmelCase : List[Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=A , device=self.device , dtype=self.transformer.dtype , )
_UpperCAmelCase : str = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
_UpperCAmelCase : List[str] = torch.tensor(A , device=self.device ).reshape(-1 )
_UpperCAmelCase : Tuple = torch.tensor([1_0_0_0] * batch_size , device=self.device )
_UpperCAmelCase : List[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(A )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
_UpperCAmelCase : Tuple = latent_model_input[: len(A ) // 2]
_UpperCAmelCase : int = torch.cat([half, half] , dim=0 )
_UpperCAmelCase : List[Any] = self.scheduler.scale_model_input(A , A )
_UpperCAmelCase : List[str] = t
if not torch.is_tensor(A ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
_UpperCAmelCase : Any = latent_model_input.device.type == '''mps'''
if isinstance(A , A ):
_UpperCAmelCase : Dict = torch.floataa if is_mps else torch.floataa
else:
_UpperCAmelCase : Dict = torch.intaa if is_mps else torch.intaa
_UpperCAmelCase : Optional[int] = torch.tensor([timesteps] , dtype=A , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
_UpperCAmelCase : Tuple = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCAmelCase : Dict = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
_UpperCAmelCase : Dict = self.transformer(
A , timestep=A , class_labels=A ).sample
# perform guidance
if guidance_scale > 1:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = torch.split(A , len(A ) // 2 , dim=0 )
_UpperCAmelCase : List[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
_UpperCAmelCase : Tuple = torch.cat([half_eps, half_eps] , dim=0 )
_UpperCAmelCase : Optional[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = torch.split(A , A , dim=1 )
else:
_UpperCAmelCase : List[str] = noise_pred
# compute previous image: x_t -> x_t-1
_UpperCAmelCase : List[Any] = self.scheduler.step(A , A , A ).prev_sample
if guidance_scale > 1:
_UpperCAmelCase , _UpperCAmelCase : List[str] = latent_model_input.chunk(2 , dim=0 )
else:
_UpperCAmelCase : Tuple = latent_model_input
_UpperCAmelCase : int = 1 / self.vae.config.scaling_factor * latents
_UpperCAmelCase : List[Any] = self.vae.decode(A ).sample
_UpperCAmelCase : Optional[int] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_UpperCAmelCase : List[str] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase : List[Any] = self.numpy_to_pil(A )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=A )
| 263
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , *A , **A ) -> None:
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 263
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class a ( UpperCamelCase__ ):
"""simple docstring"""
lowerCamelCase :List[Any] = """timm_backbone"""
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=3 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Any:
super().__init__(**__a )
_A = backbone
_A = num_channels
_A = features_only
_A = use_pretrained_backbone
_A = True
_A = out_indices if out_indices is not None else (-1,)
| 367
|
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
_SCREAMING_SNAKE_CASE = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def snake_case ( snake_case__ :List[str] , snake_case__ :Dict) -> str:
_A = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
_A = int(re.match(R""".*layer_(\d*).*""" , snake_case__)[1])
layer_number -= 3
return F'''h.{layer_number}.''' + key
def snake_case ( snake_case__ :Tuple) -> int:
if dtype == torch.bool:
return 1 / 8
_A = re.search(R"""[^\d](\d+)$""" , str(snake_case__))
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''')
_A = int(bit_search.groups()[0])
return bit_size // 8
def snake_case ( snake_case__ :Dict , snake_case__ :Any , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :List[Any]) -> List[str]:
# Construct model
if bloom_config_file == "":
_A = BloomConfig()
else:
_A = BloomConfig.from_json_file(snake_case__)
if shard_model:
_A = os.listdir(snake_case__)
_A = sorted(filter(lambda snake_case__: s.startswith("""layer""") and "model_00" in s , snake_case__))
_A = {"""weight_map""": {}, """metadata""": {}}
_A = 0
_A = None
_A = BloomConfig()
for j, file in enumerate(snake_case__):
print("""Processing file: {}""".format(snake_case__))
_A = None
for i in range(snake_case__):
# load all TP files
_A = file.replace("""model_00""" , F'''model_0{i}''')
_A = torch.load(os.path.join(snake_case__ , snake_case__) , map_location="""cpu""")
# Rename keys in the transformers names
_A = list(temp.keys())
for key in keys:
_A = temp.pop(snake_case__)
if tensors is None:
_A = temp
else:
for key in tensors.keys():
if any(key.endswith(snake_case__) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_A = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN) else 0
# We concatenate these weights accross TP ranks
_A = torch.cat([tensors[key], temp[key]] , dim=snake_case__)
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(snake_case__) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
_A = tensors[key] / pretraining_tp
torch.save(
snake_case__ , os.path.join(
snake_case__ , """pytorch_model_{}-of-{}.bin""".format(str(j + 1).zfill(5) , str(len(snake_case__)).zfill(5)) , ) , )
for key in tensors.keys():
_A = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype)
if key not in index_dict["weight_map"]:
_A = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1).zfill(5) , str(len(snake_case__)).zfill(5))
_A = BloomConfig()
_A = pytorch_dump_folder_path + """/""" + CONFIG_NAME
_A = total_size
with open(snake_case__ , """w""" , encoding="""utf-8""") as f:
f.write(config.to_json_string())
with open(os.path.join(snake_case__ , WEIGHTS_NAME + """.index.json""") , """w""" , encoding="""utf-8""") as f:
_A = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__) + """\n"""
f.write(snake_case__)
else:
_A = BloomModel(snake_case__)
_A = os.listdir(snake_case__)
_A = sorted(filter(lambda snake_case__: s.startswith("""layer""") and "model_00" in s , snake_case__))
_A = None
for i, file in enumerate(snake_case__):
_A = None
for i in range(snake_case__):
# load all TP files
_A = file.replace("""model_00""" , F'''model_0{i}''')
_A = torch.load(os.path.join(snake_case__ , snake_case__) , map_location="""cpu""")
# Rename keys in the transformers names
_A = list(temp.keys())
for key in keys:
_A = temp.pop(snake_case__)
if tensors is None:
_A = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(snake_case__) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_A = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN) else 0
# We concatenate these weights accross TP ranks
_A = torch.cat([tensors[key], temp[key]] , dim=snake_case__)
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(snake_case__) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
_A = tensors[key] / pretraining_tp
_A = model.load_state_dict(snake_case__ , strict=snake_case__)
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
_A = set(other_keys.missing_keys)
else:
_A = missing_keys.intersection(set(other_keys.missing_keys))
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(snake_case__ , exist_ok=snake_case__)
_A = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_A = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''')
if config.torch_dtype is not None:
_A = model.to(config.torch_dtype)
torch.save(model.state_dict() , snake_case__)
print(F'''Save configuration file to {pytorch_config_dump_path}''')
with open(snake_case__ , """w""" , encoding="""utf-8""") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 81
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=None , )-> Optional[Any]:
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_mask
lowerCamelCase_ =use_labels
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =projection_dim
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =initializer_range
lowerCamelCase_ =scope
lowerCamelCase_ =bos_token_id
def _snake_case ( self )-> Tuple:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ =None
if self.use_input_mask:
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCamelCase_ =input_mask.numpy()
lowerCamelCase_ , lowerCamelCase_ =input_mask.shape
lowerCamelCase_ =np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
lowerCamelCase_ =1
lowerCamelCase_ =0
lowerCamelCase_ =self.get_config()
return config, input_ids, tf.convert_to_tensor(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Dict:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Tuple:
lowerCamelCase_ =TFBlipTextModel(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =config_and_inputs
lowerCamelCase_ ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:Tuple = (TFBlipTextModel,) if is_tf_available() else ()
_UpperCamelCase:Optional[int] = False
_UpperCamelCase:Dict = False
_UpperCamelCase:Any = False
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =BlipTextModelTester(self )
lowerCamelCase_ =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _snake_case ( self )-> int:
self.config_tester.run_common_tests()
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> List[str]:
pass
def _snake_case ( self )-> str:
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def _snake_case ( self )-> Dict:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def _snake_case ( self )-> List[Any]:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def _snake_case ( self )-> Optional[Any]:
pass
@slow
def _snake_case ( self )-> Optional[int]:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =TFBlipTextModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=True )-> str:
super().test_pt_tf_model_equivalence(allow_missing_keys=_SCREAMING_SNAKE_CASE )
| 154
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Tuple = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 154
| 1
|
from __future__ import annotations
def UpperCamelCase ( snake_case__ : str ) -> list[int]:
return [ord(snake_case__ ) - 96 for elem in plain]
def UpperCamelCase ( snake_case__ : list[int] ) -> str:
return "".join(chr(elem + 96 ) for elem in encoded )
def UpperCamelCase ( ) -> None:
UpperCamelCase : List[str] = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , snake_case__ )
print('Decoded:' , decode(snake_case__ ) )
if __name__ == "__main__":
main()
| 358
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=224, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5], SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5], ) -> List[str]:
UpperCamelCase : Optional[int] = size if size is not None else {'height': 18, 'width': 18}
UpperCamelCase : List[Any] = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : int = num_channels
UpperCamelCase : int = image_size
UpperCamelCase : List[Any] = min_resolution
UpperCamelCase : int = max_resolution
UpperCamelCase : Any = do_resize
UpperCamelCase : Optional[int] = size
UpperCamelCase : List[str] = do_normalize
UpperCamelCase : Optional[Any] = image_mean
UpperCamelCase : Tuple = image_std
def snake_case_ ( self ) -> List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = ViTImageProcessor if is_vision_available() else None
def snake_case_ ( self ) -> Any:
UpperCamelCase : Dict = EfficientFormerImageProcessorTester(self )
@property
def snake_case_ ( self ) -> List[Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'image_mean' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'image_std' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_normalize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'size' ) )
def snake_case_ ( self ) -> Any:
pass
def snake_case_ ( self ) -> int:
# Initialize image_processor
UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : List[str] = prepare_image_inputs(self.image_proc_tester, equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, Image.Image )
# Test not batched input
UpperCamelCase : str = image_processor(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
# Test batched
UpperCamelCase : Optional[Any] = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
def snake_case_ ( self ) -> str:
# Initialize image_processor
UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester, equal_resolution=SCREAMING_SNAKE_CASE_, numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, np.ndarray )
# Test not batched input
UpperCamelCase : Dict = image_processor(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
# Test batched
UpperCamelCase : Dict = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
def snake_case_ ( self ) -> Tuple:
# Initialize image_processor
UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : int = prepare_image_inputs(self.image_proc_tester, equal_resolution=SCREAMING_SNAKE_CASE_, torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_, torch.Tensor )
# Test not batched input
UpperCamelCase : Optional[int] = image_processor(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
# Test batched
UpperCamelCase : int = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
), )
| 103
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@property
def _snake_case ( self ):
torch.manual_seed(0 )
lowercase__: Union[str, Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def _snake_case ( self ):
lowercase__: Tuple = self.dummy_uncond_unet
lowercase__: Dict = KarrasVeScheduler()
lowercase__: Dict = KarrasVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: int = torch.manual_seed(0 )
lowercase__: List[Any] = pipe(num_inference_steps=2 , generator=_UpperCAmelCase , output_type='''numpy''' ).images
lowercase__: Union[str, Any] = torch.manual_seed(0 )
lowercase__: int = pipe(num_inference_steps=2 , generator=_UpperCAmelCase , output_type='''numpy''' , return_dict=_UpperCAmelCase )[0]
lowercase__: List[str] = image[0, -3:, -3:, -1]
lowercase__: Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__: str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: Dict = '''google/ncsnpp-celebahq-256'''
lowercase__: List[Any] = UNetaDModel.from_pretrained(_UpperCAmelCase )
lowercase__: Union[str, Any] = KarrasVeScheduler()
lowercase__: Optional[Any] = KarrasVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Tuple = torch.manual_seed(0 )
lowercase__: Dict = pipe(num_inference_steps=20 , generator=_UpperCAmelCase , output_type='''numpy''' ).images
lowercase__: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__: Union[str, Any] = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 177
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
if n == 1 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
lowercase__: List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
lowercase__: Union[str, Any] = 0
lowercase__: List[Any] = 2
while digits < n:
index += 1
lowercase__: Dict = len(str(fibonacci(__UpperCAmelCase ) ) )
return index
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 1_0_0_0 ) -> int:
return fibonacci_digits_index(__UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 177
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
A = logging.get_logger(__name__)
A = '''Hello, World!'''
A = '''en_XX'''
def __A ( a_ :str , a_ :str , a_ :bool) -> Optional[int]:
__a : List[Any] = Path('''data_bin''')
__a : Optional[int] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(a_).parent) , checkpoint_file=Path(a_).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(a_) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(a_).parent / '''sentencepiece.bpe.model''') , src_dict=str(data_dir / '''dict.txt''') , )
xmod.eval() # disable dropout
print(a_)
__a : List[str] = xmod.model.encoder.sentence_encoder
__a : List[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__a : Union[str, Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , a_)
__a : List[str] = XmodForSequenceClassification(a_) if classification_head else XmodForMaskedLM(a_)
model.eval()
# Now let's copy all the weights.
# Embeddings
__a : Tuple = xmod_sent_encoder.embed_tokens.weight
__a : Union[str, Any] = xmod_sent_encoder.embed_positions.weight
__a : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c xmod doesn't use them.
__a : Optional[int] = xmod_sent_encoder.layernorm_embedding.weight
__a : List[Any] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
__a : Dict = model.roberta.encoder.layer[i]
__a : Optional[int] = xmod_sent_encoder.layers[i]
# self attention
__a : str = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size))
):
raise AssertionError('''Dimensions of self-attention weights do not match.''')
__a : Union[str, Any] = xmod_layer.self_attn.q_proj.weight
__a : Tuple = xmod_layer.self_attn.q_proj.bias
__a : Any = xmod_layer.self_attn.k_proj.weight
__a : Dict = xmod_layer.self_attn.k_proj.bias
__a : List[str] = xmod_layer.self_attn.v_proj.weight
__a : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
__a : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''')
__a : int = xmod_layer.self_attn.out_proj.weight
__a : Any = xmod_layer.self_attn.out_proj.bias
__a : Any = xmod_layer.self_attn_layer_norm.weight
__a : Any = xmod_layer.self_attn_layer_norm.bias
# intermediate
__a : Any = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''')
__a : Dict = xmod_layer.fca.weight
__a : int = xmod_layer.fca.bias
# output
__a : Optional[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''')
__a : str = xmod_layer.fca.weight
__a : int = xmod_layer.fca.bias
__a : Any = xmod_layer.final_layer_norm.weight
__a : List[str] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__a : Tuple = xmod_layer.adapter_layer_norm.weight
__a : Dict = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys()) != sorted(xmod_layer.adapter_modules.keys()):
raise AssertionError('''Lists of language adapters do not match.''')
for lang_code, adapter in xmod_layer.adapter_modules.items():
__a : Any = bert_output.adapter_modules[lang_code]
__a : List[Any] = xmod_layer.adapter_modules[lang_code]
__a : List[str] = from_adapter.fca.weight
__a : int = from_adapter.fca.bias
__a : Tuple = from_adapter.fca.weight
__a : Optional[int] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__a : Union[str, Any] = xmod_sent_encoder.layer_norm.weight
__a : Union[str, Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
__a : str = xmod.model.classification_heads['''mnli'''].dense.weight
__a : Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.bias
__a : Optional[int] = xmod.model.classification_heads['''mnli'''].out_proj.weight
__a : str = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
__a : Tuple = xmod.model.encoder.lm_head.dense.weight
__a : List[Any] = xmod.model.encoder.lm_head.dense.bias
__a : Tuple = xmod.model.encoder.lm_head.layer_norm.weight
__a : int = xmod.model.encoder.lm_head.layer_norm.bias
__a : int = xmod.model.encoder.lm_head.weight
__a : Optional[Any] = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__a : Dict = xmod.encode(a_).unsqueeze(0) # batch of size 1
model.roberta.set_default_language(a_)
__a : str = model(a_)[0]
if classification_head:
__a : Optional[Any] = xmod.model.classification_heads['''mnli'''](xmod.extract_features(a_))
else:
__a : List[Any] = xmod.model(a_ , lang_id=[SAMPLE_LANGUAGE])[0]
print(our_output.shape , their_output.shape)
__a : str = torch.max(torch.abs(our_output - their_output)).item()
print(F"""max_absolute_diff = {max_absolute_diff}""") # ~ 1e-7
__a : Optional[int] = torch.allclose(a_ , a_ , atol=1e-3)
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''')
if not success:
raise Exception('''Something went wRoNg''')
Path(a_).mkdir(parents=a_ , exist_ok=a_)
print(F"""Saving model to {pytorch_dump_folder_path}""")
model.save_pretrained(a_)
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
A = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 356
|
"""simple docstring"""
import os
import string
import sys
A = 1 << 8
A = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
A = KEYMAP['''up''']
A = KEYMAP['''left''']
if sys.platform == "win32":
A = []
A = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
A = ord(str(i))
def __A ( ) -> Dict:
if os.name == "nt":
import msvcrt
__a : Optional[Any] = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(a_) == 0:
# Read the keystroke
__a : Optional[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__a : Optional[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__a : Union[str, Any] = chr(WIN_KEYMAP[cha])
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int''']))
WIN_CH_BUFFER.append(a_)
if ord(a_) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26))
__a : str = chr(KEYMAP['''esc'''])
except KeyError:
__a : str = cha[1]
else:
__a : Optional[Any] = ch.decode(a_)
else:
__a : Union[str, Any] = WIN_CH_BUFFER.pop(0)
elif os.name == "posix":
import termios
import tty
__a : Any = sys.stdin.fileno()
__a : List[str] = termios.tcgetattr(a_)
try:
tty.setraw(a_)
__a : int = sys.stdin.read(1)
finally:
termios.tcsetattr(a_ , termios.TCSADRAIN , a_)
return ch
def __A ( ) -> str:
__a : Any = get_raw_chars()
if ord(a_) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(a_) == KEYMAP["esc"]:
__a : str = get_raw_chars()
if ord(a_) == KEYMAP["mod_int"]:
__a : List[str] = get_raw_chars()
if ord(a_) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(a_) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(a_) + ARROW_KEY_FLAG)
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 188
| 0
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE :Optional[Any] = 'cpu'
SCREAMING_SNAKE_CASE :Tuple = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
SCREAMING_SNAKE_CASE :Any = 'path-to-your-trained-model'
SCREAMING_SNAKE_CASE :List[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
SCREAMING_SNAKE_CASE :Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
SCREAMING_SNAKE_CASE :List[Any] = pipe.to(device)
# to channels last
SCREAMING_SNAKE_CASE :Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
SCREAMING_SNAKE_CASE :Union[str, Any] = pipe.vae.to(memory_format=torch.channels_last)
SCREAMING_SNAKE_CASE :str = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
SCREAMING_SNAKE_CASE :Optional[Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
SCREAMING_SNAKE_CASE :Union[str, Any] = torch.randn(2, 4, 64, 64)
SCREAMING_SNAKE_CASE :Union[str, Any] = torch.rand(1) * 999
SCREAMING_SNAKE_CASE :Any = torch.randn(2, 77, 768)
SCREAMING_SNAKE_CASE :List[Any] = (sample, timestep, encoder_hidden_status)
try:
SCREAMING_SNAKE_CASE :int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
SCREAMING_SNAKE_CASE :List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
SCREAMING_SNAKE_CASE :Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
SCREAMING_SNAKE_CASE :Union[str, Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
SCREAMING_SNAKE_CASE :Optional[Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
SCREAMING_SNAKE_CASE :Tuple = 666
SCREAMING_SNAKE_CASE :Optional[int] = torch.Generator(device).manual_seed(seed)
SCREAMING_SNAKE_CASE :Optional[Any] = {'generator': generator}
if args.steps is not None:
SCREAMING_SNAKE_CASE :str = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
SCREAMING_SNAKE_CASE :Optional[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 15
|
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
SCREAMING_SNAKE_CASE :List[str] = 'pytorch_model.bin'
SCREAMING_SNAKE_CASE :str = 'pytorch_model.bin.index.json'
SCREAMING_SNAKE_CASE :Optional[int] = 'adapter_config.json'
SCREAMING_SNAKE_CASE :Dict = 'adapter_model.bin'
SCREAMING_SNAKE_CASE :Dict = 'adapter_model.safetensors'
SCREAMING_SNAKE_CASE :str = 'tf_model.h5'
SCREAMING_SNAKE_CASE :List[Any] = 'tf_model.h5.index.json'
SCREAMING_SNAKE_CASE :str = 'model.ckpt'
SCREAMING_SNAKE_CASE :List[Any] = 'flax_model.msgpack'
SCREAMING_SNAKE_CASE :Optional[int] = 'flax_model.msgpack.index.json'
SCREAMING_SNAKE_CASE :Tuple = 'model.safetensors'
SCREAMING_SNAKE_CASE :List[Any] = 'model.safetensors.index.json'
SCREAMING_SNAKE_CASE :str = 'config.json'
SCREAMING_SNAKE_CASE :int = 'preprocessor_config.json'
SCREAMING_SNAKE_CASE :Optional[Any] = FEATURE_EXTRACTOR_NAME
SCREAMING_SNAKE_CASE :Optional[int] = 'generation_config.json'
SCREAMING_SNAKE_CASE :List[str] = 'modelcard.json'
SCREAMING_SNAKE_CASE :Optional[int] = '▁'
SCREAMING_SNAKE_CASE :Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
SCREAMING_SNAKE_CASE :str = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
SCREAMING_SNAKE_CASE :Optional[Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
SCREAMING_SNAKE_CASE :List[Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
if version.parse(a_ ) < version.parse(a_ ):
if "dev" in min_version:
__A = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
__A = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 15
| 1
|
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
snake_case : Optional[int] = []
snake_case : int = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
snake_case : int = result + left + right
return input_list
def UpperCamelCase ( __lowerCamelCase : list ):
"""simple docstring"""
if len(__lowerCamelCase ) <= 1:
return input_list
snake_case : Optional[int] = list(__lowerCamelCase )
# iteration for two-way merging
snake_case : Optional[Any] = 2
while p <= len(__lowerCamelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ):
snake_case : int = i
snake_case : Optional[Any] = i + p - 1
snake_case : Any = (low + high + 1) // 2
snake_case : List[str] = merge(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# final merge of last two parts
if p * 2 >= len(__lowerCamelCase ):
snake_case : List[str] = i
snake_case : Optional[Any] = merge(__lowerCamelCase , 0 , __lowerCamelCase , len(__lowerCamelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
__lowerCamelCase = []
else:
__lowerCamelCase = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 362
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase ( A_ ):
def __init__(self : List[Any] , *snake_case__ : List[str] , **snake_case__ : Dict ) -> None:
'''simple docstring'''
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 10
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : List[Any] ={
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] =[
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
UpperCAmelCase : Optional[int] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 128
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowerCamelCase : Any = 16
__lowerCamelCase : List[Any] = 32
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 16 , __UpperCamelCase : str = "bert-base-cased" ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCamelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE__ = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCamelCase : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(__UpperCamelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
model.eval()
SCREAMING_SNAKE_CASE__ = 0
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCamelCase ) - 1:
SCREAMING_SNAKE_CASE__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
SCREAMING_SNAKE_CASE__ = metric.compute()
return eval_metric["accuracy"]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ = config["""lr"""]
SCREAMING_SNAKE_CASE__ = int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ = int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ = int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ = args.model_name_or_path
set_seed(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_dataloaders(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase , return_dict=__UpperCamelCase )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE__ = optimizer_cls(params=model.parameters() , lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE__ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=0 , num_training_steps=__UpperCamelCase , )
else:
SCREAMING_SNAKE_CASE__ = DummyScheduler(__UpperCamelCase , total_num_steps=__UpperCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE__ = 0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = evaluate.load("""glue""" , """mrpc""" )
SCREAMING_SNAKE_CASE__ = num_epochs
if args.partial_train_epoch is not None:
SCREAMING_SNAKE_CASE__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
SCREAMING_SNAKE_CASE__ = args.resume_from_checkpoint.split("""epoch_""" )[1]
SCREAMING_SNAKE_CASE__ = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
SCREAMING_SNAKE_CASE__ = int(__UpperCamelCase ) + 1
SCREAMING_SNAKE_CASE__ = evaluation_loop(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
accelerator.print("""resumed checkpoint performance:""" , __UpperCamelCase )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
SCREAMING_SNAKE_CASE__ = json.load(__UpperCamelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
SCREAMING_SNAKE_CASE__ = {}
for epoch in range(__UpperCamelCase , __UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = model(**__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = outputs.loss
SCREAMING_SNAKE_CASE__ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
SCREAMING_SNAKE_CASE__ = f"""epoch_{epoch}"""
SCREAMING_SNAKE_CASE__ = os.path.join(args.output_dir , __UpperCamelCase )
accelerator.save_state(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = evaluation_loop(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = accuracy
SCREAMING_SNAKE_CASE__ = lr_scheduler.get_lr()[0]
SCREAMING_SNAKE_CASE__ = optimizer.param_groups[0]["""lr"""]
SCREAMING_SNAKE_CASE__ = epoch
SCREAMING_SNAKE_CASE__ = overall_step
accelerator.print(f"""epoch {epoch}:""" , __UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=__UpperCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__UpperCamelCase , )
parser.add_argument(
"""--output_dir""" , type=__UpperCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=__UpperCamelCase , default=2 , help="""Number of train epochs.""" , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 219
| 0
|
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCamelCase_ :
def __init__( self : Tuple , _A : str , _A : Any=13 , _A : Dict=7 , _A : List[str]=False , _A : Optional[Any]=True , _A : str=False , _A : Optional[int]=True , _A : Union[str, Any]=33 , _A : Union[str, Any]=32 , _A : Optional[int]=5 , _A : int=4 , _A : List[str]=37 , _A : Union[str, Any]="gelu" , _A : Tuple=0.1 , _A : List[Any]=0.1 , _A : List[Any]=512 , _A : Union[str, Any]=16 , _A : Dict=2 , _A : Union[str, Any]=0.0_2 , _A : Any=3 , _A : List[Any]=4 , _A : List[str]=None , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = parent
UpperCAmelCase__ : List[Any] = batch_size
UpperCAmelCase__ : Tuple = seq_length
UpperCAmelCase__ : Optional[Any] = is_training
UpperCAmelCase__ : str = use_input_mask
UpperCAmelCase__ : Optional[int] = use_token_type_ids
UpperCAmelCase__ : List[str] = use_labels
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = max_position_embeddings
UpperCAmelCase__ : Optional[int] = type_vocab_size
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : Union[str, Any] = num_labels
UpperCAmelCase__ : int = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowercase_ ( self : Dict , _A : Dict , _A : Tuple , _A : str , _A : Any , _A : Dict , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = EsmModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase__ : Dict = model(__A , attention_mask=__A )
UpperCAmelCase__ : Any = model(__A )
UpperCAmelCase__ : List[Any] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ ( self : int , _A : Any , _A : Union[str, Any] , _A : List[Any] , _A : Any , _A : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = EsmForMaskedLM(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase__ : Optional[int] = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Tuple , _A : Dict , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Optional[int] = EsmForTokenClassification(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase__ : Dict = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.prepare_config_and_inputs()
(
UpperCAmelCase__
) : Optional[int] = config_and_inputs
UpperCAmelCase__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( A__ , A__ , unittest.TestCase ):
lowerCAmelCase__ = False
lowerCAmelCase__ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = ()
lowerCAmelCase__ = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = EsmModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=__A , hidden_size=37 )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : List[Any] = type
self.model_tester.create_and_check_model(*__A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Any = EsmModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
UpperCAmelCase__ : Dict = EsmEmbeddings(config=__A )
UpperCAmelCase__ : Optional[Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
UpperCAmelCase__ : str = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
UpperCAmelCase__ : List[Any] = create_position_ids_from_input_ids(__A , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__A , __A ) ) )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()[0]
UpperCAmelCase__ : List[str] = EsmEmbeddings(config=__A )
UpperCAmelCase__ : int = torch.empty(2 , 4 , 30 )
UpperCAmelCase__ : Tuple = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
UpperCAmelCase__ : Dict = torch.as_tensor([expected_single_positions, expected_single_positions] )
UpperCAmelCase__ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__A )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__A , __A ) ) )
@unittest.skip('''Esm does not support embedding resizing''' )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def lowercase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch
class lowerCamelCase_ ( A__ ):
@slow
def lowercase_ ( self : str ):
'''simple docstring'''
with torch.no_grad():
UpperCAmelCase__ : int = EsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
UpperCAmelCase__ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ : Optional[int] = model(__A )[0]
UpperCAmelCase__ : Optional[Any] = 33
UpperCAmelCase__ : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __A )
UpperCAmelCase__ : List[str] = torch.tensor(
[[[8.9_2_1_5, -10.5_898, -6.4_6_7_1], [-6.3_9_6_7, -13.9_114, -1.1_2_1_2], [-7.7_8_1_2, -13.9_516, -3.7_4_0_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
@slow
def lowercase_ ( self : Tuple ):
'''simple docstring'''
with torch.no_grad():
UpperCAmelCase__ : Any = EsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
UpperCAmelCase__ : Dict = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase__ : Union[str, Any] = model(__A )[0]
# compare the actual values for a slice.
UpperCAmelCase__ : List[str] = torch.tensor(
[[[0.1_4_4_4, 0.5_4_1_3, 0.3_2_4_8], [0.3_0_3_4, 0.0_0_5_3, 0.3_1_0_8], [0.3_2_2_8, -0.2_4_9_9, 0.3_4_1_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
| 351
|
'''simple docstring'''
from __future__ import annotations
import math
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
)
def a__ ( ) -> None:
UpperCAmelCase__ : Union[str, Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
UpperCAmelCase__ : Optional[Any] = math.log(len(lowerCAmelCase__ ) , 2 )
print(F"""Optimal value : {minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 299
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Tuple = logging.get_logger(__name__)
A : str = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __A( a ):
snake_case_ = '''mobilenet_v2'''
def __init__( self , _snake_case=3 , _snake_case=224 , _snake_case=1.0 , _snake_case=8 , _snake_case=8 , _snake_case=6 , _snake_case=32 , _snake_case=True , _snake_case=True , _snake_case="relu6" , _snake_case=True , _snake_case=0.8 , _snake_case=0.02 , _snake_case=0.001 , _snake_case=255 , **_snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_snake_case )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
__a = num_channels
__a = image_size
__a = depth_multiplier
__a = depth_divisible_by
__a = min_depth
__a = expand_ratio
__a = output_stride
__a = first_layer_is_expansion
__a = finegrained_output
__a = hidden_act
__a = tf_padding
__a = classifier_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = semantic_loss_ignore_index
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-4
| 6
|
'''simple docstring'''
__snake_case : Tuple = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__snake_case : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
__snake_case : Any = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 134
| 0
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a_ = typing.Union[np.floataa, int, float] # noqa: UP007
def _a( UpperCamelCase__ : Vector, UpperCamelCase__ : Vector ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(UpperCamelCase__ ) - np.asarray(UpperCamelCase__ )) ** 2 ) )
def _a( UpperCamelCase__ : Vector, UpperCamelCase__ : Vector ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(UpperCamelCase__, UpperCamelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def _a( ):
'''simple docstring'''
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''', number=1_0_0_0_0, globals=globals(), ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''', number=1_0_0_0_0, globals=globals(), ) )
benchmark()
| 222
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 222
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase ( __lowerCAmelCase ):
def __init__( self ) -> str:
lowerCAmelCase = []
def _snake_case ( self , lowercase , lowercase , lowercase , **lowercase ) -> Dict:
self.events.append("""on_init_end""" )
def _snake_case ( self , lowercase , lowercase , lowercase , **lowercase ) -> int:
self.events.append("""on_train_begin""" )
def _snake_case ( self , lowercase , lowercase , lowercase , **lowercase ) -> List[str]:
self.events.append("""on_train_end""" )
def _snake_case ( self , lowercase , lowercase , lowercase , **lowercase ) -> Optional[int]:
self.events.append("""on_epoch_begin""" )
def _snake_case ( self , lowercase , lowercase , lowercase , **lowercase ) -> Dict:
self.events.append("""on_epoch_end""" )
def _snake_case ( self , lowercase , lowercase , lowercase , **lowercase ) -> List[str]:
self.events.append("""on_step_begin""" )
def _snake_case ( self , lowercase , lowercase , lowercase , **lowercase ) -> Any:
self.events.append("""on_step_end""" )
def _snake_case ( self , lowercase , lowercase , lowercase , **lowercase ) -> Optional[Any]:
self.events.append("""on_evaluate""" )
def _snake_case ( self , lowercase , lowercase , lowercase , **lowercase ) -> Dict:
self.events.append("""on_predict""" )
def _snake_case ( self , lowercase , lowercase , lowercase , **lowercase ) -> Union[str, Any]:
self.events.append("""on_save""" )
def _snake_case ( self , lowercase , lowercase , lowercase , **lowercase ) -> List[str]:
self.events.append("""on_log""" )
def _snake_case ( self , lowercase , lowercase , lowercase , **lowercase ) -> int:
self.events.append("""on_prediction_step""" )
@require_torch
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = tempfile.mkdtemp()
def _snake_case ( self ) -> int:
shutil.rmtree(self.output_dir )
def _snake_case ( self , lowercase=0 , lowercase=0 , lowercase=64 , lowercase=64 , lowercase=None , lowercase=False , **lowercase ) -> Union[str, Any]:
lowerCAmelCase = RegressionDataset(length=lowerCamelCase__ )
lowerCAmelCase = RegressionDataset(length=lowerCamelCase__ )
lowerCAmelCase = RegressionModelConfig(a=lowerCamelCase__ , b=lowerCamelCase__ )
lowerCAmelCase = RegressionPreTrainedModel(lowerCamelCase__ )
lowerCAmelCase = TrainingArguments(self.output_dir , disable_tqdm=lowerCamelCase__ , report_to=[] , **lowerCamelCase__ )
return Trainer(
lowerCamelCase__ , lowerCamelCase__ , train_dataset=lowerCamelCase__ , eval_dataset=lowerCamelCase__ , callbacks=lowerCamelCase__ , )
def _snake_case ( self , lowercase , lowercase ) -> str:
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
# Order doesn't matter
lowerCAmelCase = sorted(lowerCamelCase__ , key=lambda lowercase : cb.__name__ if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cb.__class__.__name__ )
lowerCAmelCase = sorted(lowerCamelCase__ , key=lambda lowercase : cb.__name__ if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowerCamelCase__ , lowerCamelCase__ ):
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(lowerCamelCase__ , cba.__class__ )
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(cba.__class__ , lowerCamelCase__ )
else:
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def _snake_case ( self , lowercase ) -> str:
lowerCAmelCase = ['''on_init_end''', '''on_train_begin''']
lowerCAmelCase = 0
lowerCAmelCase = len(trainer.get_eval_dataloader() )
lowerCAmelCase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(lowerCamelCase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.get_trainer()
lowerCAmelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ )
# Callbacks passed at init are added to the default callbacks
lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowerCAmelCase = self.get_trainer(disable_tqdm=lowerCamelCase__ )
lowerCAmelCase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowerCAmelCase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowerCamelCase__ )
expected_callbacks.remove(lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ )
lowerCAmelCase = self.get_trainer()
lowerCAmelCase = trainer.pop_callback(lowerCamelCase__ )
self.assertEqual(cb.__class__ , lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ )
trainer.add_callback(lowerCamelCase__ )
expected_callbacks.insert(0 , lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ )
# We can also add, pop, or remove by instance
lowerCAmelCase = self.get_trainer()
lowerCAmelCase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowerCamelCase__ )
expected_callbacks.remove(lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ )
lowerCAmelCase = self.get_trainer()
lowerCAmelCase = trainer.callback_handler.callbacks[0]
lowerCAmelCase = trainer.pop_callback(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ )
trainer.add_callback(lowerCamelCase__ )
expected_callbacks.insert(0 , lowerCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowerCamelCase__ )
def _snake_case ( self ) -> str:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=lowerCamelCase__ )
lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowerCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ , self.get_expected_events(lowerCamelCase__ ) )
# Independent log/save/eval
lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowerCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ , self.get_expected_events(lowerCamelCase__ ) )
lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowerCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ , self.get_expected_events(lowerCamelCase__ ) )
lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
lowerCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ , self.get_expected_events(lowerCamelCase__ ) )
lowerCAmelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
lowerCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ , self.get_expected_events(lowerCamelCase__ ) )
# A bit of everything
lowerCAmelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
lowerCAmelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowerCamelCase__ , self.get_expected_events(lowerCamelCase__ ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
lowerCAmelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(lowerCamelCase__ ) in warn_mock.call_args[0][0]
| 46
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = LxmertConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(F"Building PyTorch model from configuration: {config}" )
UpperCamelCase__ : List[str] = LxmertForPreTraining(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCamelCase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 146
| 0
|
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
def snake_case_ ( snake_case , snake_case , snake_case , snake_case=False ) -> List[Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
lowercase__: Optional[Any] = os.path.abspath(SCREAMING_SNAKE_CASE_ )
logger.info(f'Loading PyTorch weights from {pt_path}' )
lowercase__: Optional[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
logger.info(f'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
lowercase__: Optional[int] = convert_pytorch_state_dict_to_flax(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowercase__: int = convert_pytorch_sharded_state_dict_to_flax(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return flax_state_dict
def snake_case_ ( snake_case , snake_case , snake_case , snake_case , ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(snake_case ) -> bool:
return len(set(SCREAMING_SNAKE_CASE_ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowercase__: str = pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE_ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowercase__: str = pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE_ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowercase__: int = pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE_ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowercase__: Optional[int] = pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE_ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase__: int = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE_ ):
lowercase__: Optional[int] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase__: Union[str, Any] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE_ ):
lowercase__: Any = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase__: List[str] = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase__: List[str] = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowercase__: Optional[Any] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowercase__: str = pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowercase__: Tuple = pt_tuple_key[-2] + '_v'
if name is not None:
lowercase__: int = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def snake_case_ ( snake_case , snake_case ) -> int:
lowercase__: int = {k: v.numpy() for k, v in pt_state_dict.items()}
lowercase__: List[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowercase__: Optional[Any] = flax_model.params['params']
else:
lowercase__: List[str] = flax_model.params
lowercase__: Optional[Any] = flatten_dict(SCREAMING_SNAKE_CASE_ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowercase__: Tuple = flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(SCREAMING_SNAKE_CASE_ )
lowercase__: Dict = {}
lowercase__: int = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
lowercase__: Tuple = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__: Tuple = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
lowercase__: Dict = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase__: Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowercase__ , lowercase__: List[str] = rename_key_and_reshape_tensor(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# add model prefix if necessary
lowercase__: List[Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowercase__: Union[str, Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowercase__: Optional[Any] = jnp.asarray(SCREAMING_SNAKE_CASE_ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
# also add unexpected weight so that warning is thrown
lowercase__: Optional[Any] = jnp.asarray(SCREAMING_SNAKE_CASE_ )
else:
# also add unexpected weight so that warning is thrown
lowercase__: Any = jnp.asarray(SCREAMING_SNAKE_CASE_ )
return unflatten_dict(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( snake_case , snake_case ) -> Any:
import torch
# Load the index
lowercase__: List[str] = {}
for shard_file in shard_filenames:
# load using msgpack utils
lowercase__: List[str] = torch.load(SCREAMING_SNAKE_CASE_ )
lowercase__: Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
lowercase__: Dict = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowercase__: int = flax_model.params['params']
lowercase__: Tuple = flatten_dict(SCREAMING_SNAKE_CASE_ )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
lowercase__: List[str] = flax_model.params
lowercase__: List[Any] = flatten_dict(SCREAMING_SNAKE_CASE_ )
lowercase__: Optional[int] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
lowercase__: str = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__: List[str] = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
lowercase__: Optional[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase__: List[str] = pt_tuple_key[1:]
# Correctly rename weight parameters
lowercase__ , lowercase__: Any = rename_key_and_reshape_tensor(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# add model prefix if necessary
lowercase__: Optional[Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowercase__: List[Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowercase__: List[Any] = jnp.asarray(SCREAMING_SNAKE_CASE_ )
continue
if "var" in flax_key[-1]:
lowercase__: str = jnp.asarray(SCREAMING_SNAKE_CASE_ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
# also add unexpected weight so that warning is thrown
lowercase__: Any = jnp.asarray(SCREAMING_SNAKE_CASE_ )
else:
# also add unexpected weight so that warning is thrown
lowercase__: Dict = jnp.asarray(SCREAMING_SNAKE_CASE_ )
return unflatten_dict(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( snake_case , snake_case ) -> Any:
lowercase__: Optional[Any] = os.path.abspath(SCREAMING_SNAKE_CASE_ )
logger.info(f'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
lowercase__: Optional[Any] = getattr(SCREAMING_SNAKE_CASE_ , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as state_f:
try:
lowercase__: List[str] = from_bytes(SCREAMING_SNAKE_CASE_ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def snake_case_ ( snake_case , snake_case ) -> Dict:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
lowercase__: Union[str, Any] = flatten_dict(jax.tree_util.tree_map(lambda snake_case : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE_ ) ).values()
if any(SCREAMING_SNAKE_CASE_ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
lowercase__: List[Any] = jax.tree_util.tree_map(
lambda snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE_ )
lowercase__: Optional[Any] = flatten_dict(SCREAMING_SNAKE_CASE_ )
lowercase__: Tuple = pt_model.state_dict()
lowercase__: Dict = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
lowercase__: Dict = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowercase__: Union[str, Any] = []
lowercase__: Optional[int] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowercase__: List[Any] = flax_key_tuple[0] == pt_model.base_model_prefix
lowercase__: List[Any] = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase__: Optional[Any] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowercase__: Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(SCREAMING_SNAKE_CASE_ ) not in pt_model_dict:
# conv layer
lowercase__: int = flax_key_tuple[:-1] + ('weight',)
lowercase__: Dict = jnp.transpose(SCREAMING_SNAKE_CASE_ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE_ ) not in pt_model_dict:
# linear layer
lowercase__: List[str] = flax_key_tuple[:-1] + ('weight',)
lowercase__: int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowercase__: Tuple = flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowercase__: Any = flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
lowercase__: Dict = flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
lowercase__: Union[str, Any] = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowercase__: Any = '.'.join(SCREAMING_SNAKE_CASE_ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowercase__: List[str] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowercase__: Any = key.split('.' )
lowercase__: Dict = None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowercase__: Optional[int] = key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowercase__: Optional[Any] = key_components[-2] + '_v'
if name is not None:
lowercase__: List[str] = key_components[:-3] + [name]
lowercase__: Optional[int] = '.'.join(SCREAMING_SNAKE_CASE_ )
lowercase__: List[Any] = key
if flax_key in special_pt_names:
lowercase__: List[Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
lowercase__: Any = np.asarray(SCREAMING_SNAKE_CASE_ ) if not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) else flax_tensor
lowercase__: List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE_ )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# re-transform missing_keys to list
lowercase__: Any = list(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(f'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
else:
logger.warning(
f'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'If your task is similar to the task the model of the checkpoint was trained on, '
f'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 366
|
from collections import deque
from math import floor
from random import random
from time import time
class __a :
def __init__( self ) -> Dict:
'''simple docstring'''
lowercase__: Dict = {}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 ) -> Optional[int]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowercase__: int = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
lowercase__: Union[str, Any] = []
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
return list(self.graph )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> Union[str, Any]:
'''simple docstring'''
if s == d:
return []
lowercase__: Tuple = []
lowercase__: Tuple = []
if s == -2:
lowercase__: Any = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Dict = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
lowercase__: Optional[int] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-1 ) -> List[str]:
'''simple docstring'''
if c == -1:
lowercase__: int = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase__: str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> Dict:
'''simple docstring'''
lowercase__: int = deque()
lowercase__: Dict = []
if s == -2:
lowercase__: Optional[int] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
lowercase__: str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return len(self.graph[u] )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Tuple = []
lowercase__: str = []
if s == -2:
lowercase__: Dict = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: List[Any] = s
lowercase__: Any = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Dict = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
lowercase__: int = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: List[Any] = []
lowercase__: int = []
lowercase__: List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Dict = -2
lowercase__: Union[str, Any] = []
lowercase__: List[str] = s
lowercase__: Dict = False
lowercase__: Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: List[Any] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: Any = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Union[str, Any] = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: int = s
lowercase__: str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Any = []
lowercase__: int = []
lowercase__: Dict = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Optional[int] = -2
lowercase__: List[Any] = []
lowercase__: List[str] = s
lowercase__: List[Any] = False
lowercase__: str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: Any = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Optional[Any] = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: Dict = s
lowercase__: Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> Dict:
'''simple docstring'''
lowercase__: Union[str, Any] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: Optional[Any] = time()
return end - begin
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> List[str]:
'''simple docstring'''
lowercase__: str = time()
self.bfs(lowerCAmelCase__ )
lowercase__: List[str] = time()
return end - begin
class __a :
def __init__( self ) -> Tuple:
'''simple docstring'''
lowercase__: Dict = {}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 ) -> List[Any]:
'''simple docstring'''
# check if the u exists
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowercase__: str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowercase__: Union[str, Any] = [[w, u]]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> List[str]:
'''simple docstring'''
if s == d:
return []
lowercase__: str = []
lowercase__: int = []
if s == -2:
lowercase__: Tuple = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: int = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
lowercase__: Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-1 ) -> Optional[Any]:
'''simple docstring'''
if c == -1:
lowercase__: Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase__: Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> List[Any]:
'''simple docstring'''
lowercase__: str = deque()
lowercase__: List[Any] = []
if s == -2:
lowercase__: str = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
lowercase__: Union[str, Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return len(self.graph[u] )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: str = []
lowercase__: Dict = []
lowercase__: Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Dict = -2
lowercase__: Dict = []
lowercase__: List[Any] = s
lowercase__: Union[str, Any] = False
lowercase__: List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: str = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: Dict = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: int = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: Tuple = s
lowercase__: List[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: Tuple = []
lowercase__: Optional[int] = []
lowercase__: Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Tuple = -2
lowercase__: Any = []
lowercase__: int = s
lowercase__: Optional[int] = False
lowercase__: List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Union[str, Any] = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: List[str] = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: List[str] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Dict = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: Optional[Any] = s
lowercase__: Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return list(self.graph )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: List[Any] = time()
return end - begin
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> List[Any]:
'''simple docstring'''
lowercase__: str = time()
self.bfs(lowerCAmelCase__ )
lowercase__: List[str] = time()
return end - begin
| 288
| 0
|
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
assert column_title.isupper()
A_ : Optional[int] = 0
A_ : Union[str, Any] = len(_UpperCAmelCase ) - 1
A_ : Optional[int] = 0
while index >= 0:
A_ : int = (ord(column_title[index] ) - 64) * pow(26 , _UpperCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 286
|
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : List[str] = CustomTokenizer
pass
| 286
| 1
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=13 ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=99 ,_SCREAMING_SNAKE_CASE=64 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=5 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=37 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=None ,) -> Tuple:
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Optional[int] = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Optional[Any] = use_input_mask
UpperCAmelCase_ : Tuple = use_token_type_ids
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : str = embedding_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : int = type_vocab_size
UpperCAmelCase_ : Tuple = type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Optional[Any] = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : List[Any] = scope
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Any = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Dict = None
if self.use_labels:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self ) -> str:
return MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ : List[Any] = MobileBertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCAmelCase_ : int = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase )
UpperCAmelCase_ : Dict = model(__lowerCAmelCase ,token_type_ids=__lowerCAmelCase )
UpperCAmelCase_ : Any = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ : Tuple = MobileBertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCAmelCase_ : List[str] = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : Dict = MobileBertForNextSentencePrediction(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCAmelCase_ : int = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ : str = MobileBertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCAmelCase_ : Tuple = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase ,next_sentence_label=__lowerCAmelCase ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ : List[Any] = MobileBertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCAmelCase_ : List[str] = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,start_positions=__lowerCAmelCase ,end_positions=__lowerCAmelCase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : int = self.num_labels
UpperCAmelCase_ : Dict = MobileBertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCAmelCase_ : Tuple = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCAmelCase_ : Any = self.num_labels
UpperCAmelCase_ : Optional[Any] = MobileBertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCAmelCase_ : Optional[int] = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ : Dict = self.num_choices
UpperCAmelCase_ : Dict = MobileBertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCAmelCase_ : Any = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : List[str] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : str = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def a__ ( self ) -> str:
UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
),
) : Any = config_and_inputs
UpperCAmelCase_ : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __a( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase = True
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> str:
UpperCAmelCase_ : str = super()._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ,return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
UpperCAmelCase_ : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=__lowerCAmelCase )
UpperCAmelCase_ : Any = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__lowerCAmelCase )
return inputs_dict
def a__ ( self ) -> Dict:
UpperCAmelCase_ : List[str] = MobileBertModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self ,config_class=__lowerCAmelCase ,hidden_size=37 )
def a__ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> Any:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
return torch.tensor(
a__ , dtype=torch.long , device=a__ , )
__a = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> int:
UpperCAmelCase_ : Dict = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(__lowerCAmelCase )
UpperCAmelCase_ : Union[str, Any] = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(__lowerCAmelCase )[0]
UpperCAmelCase_ : Dict = torch.Size((1, 9, 512) )
self.assertEqual(output.shape ,__lowerCAmelCase )
UpperCAmelCase_ : Any = torch.tensor(
[
[
[-2.4_73_65_26e07, 8.2_69_16_56e04, 1.6_52_18_38e05],
[-5.7_54_17_04e-01, 3.9_05_60_22e00, 4.4_01_15_07e00],
[2.6_04_73_59e00, 1.5_67_76_52e00, -1.7_32_41_88e-01],
]
] ,device=__lowerCAmelCase ,)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase_ : List[str] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase_ : Optional[Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 371
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
__a = logging.get_logger(__name__)
__a = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''layoutlmv3'''
def __init__( self ,_SCREAMING_SNAKE_CASE=50_265 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=64 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=224 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Dict:
super().__init__(
vocab_size=_SCREAMING_SNAKE_CASE ,hidden_size=_SCREAMING_SNAKE_CASE ,num_hidden_layers=_SCREAMING_SNAKE_CASE ,num_attention_heads=_SCREAMING_SNAKE_CASE ,intermediate_size=_SCREAMING_SNAKE_CASE ,hidden_act=_SCREAMING_SNAKE_CASE ,hidden_dropout_prob=_SCREAMING_SNAKE_CASE ,attention_probs_dropout_prob=_SCREAMING_SNAKE_CASE ,max_position_embeddings=_SCREAMING_SNAKE_CASE ,type_vocab_size=_SCREAMING_SNAKE_CASE ,initializer_range=_SCREAMING_SNAKE_CASE ,layer_norm_eps=_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Dict = max_ad_position_embeddings
UpperCAmelCase_ : Any = coordinate_size
UpperCAmelCase_ : Tuple = shape_size
UpperCAmelCase_ : Optional[int] = has_relative_attention_bias
UpperCAmelCase_ : Union[str, Any] = rel_pos_bins
UpperCAmelCase_ : Dict = max_rel_pos
UpperCAmelCase_ : Union[str, Any] = has_spatial_attention_bias
UpperCAmelCase_ : Any = rel_ad_pos_bins
UpperCAmelCase_ : Tuple = max_rel_ad_pos
UpperCAmelCase_ : List[str] = text_embed
UpperCAmelCase_ : int = visual_embed
UpperCAmelCase_ : int = input_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : int = patch_size
UpperCAmelCase_ : Dict = classifier_dropout
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = version.parse('''1.12''' )
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def a__ ( self ) -> float:
return 1e-5
@property
def a__ ( self ) -> int:
return 12
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 40 ,_SCREAMING_SNAKE_CASE = 40 ,) -> Mapping[str, Any]:
setattr(processor.image_processor ,'''apply_ocr''' ,_SCREAMING_SNAKE_CASE )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : List[str] = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ : Optional[Any] = processor.tokenizer.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ : Optional[Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase_ : Tuple = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase_ : Union[str, Any] = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = dict(
processor(
_SCREAMING_SNAKE_CASE ,text=_SCREAMING_SNAKE_CASE ,boxes=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,) )
return inputs
| 235
| 0
|
def _a ( SCREAMING_SNAKE_CASE_ : int = 60_08_51_47_51_43 ):
try:
__lowerCAmelCase = int(snake_case__ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__lowerCAmelCase = 2
__lowerCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowerCAmelCase = i
while n % i == 0:
__lowerCAmelCase = n // i
i += 1
return int(snake_case__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 92
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a = logging.get_logger(__name__)
a = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'deberta-v2'
def __init__( self : Optional[Any] , lowerCAmelCase : Optional[Any]=12_8100 , lowerCAmelCase : Any=1536 , lowerCAmelCase : str=24 , lowerCAmelCase : str=24 , lowerCAmelCase : Optional[Any]=6144 , lowerCAmelCase : List[str]="gelu" , lowerCAmelCase : str=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[int]=512 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : Optional[int]=0.02 , lowerCAmelCase : Optional[int]=1e-7 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Dict=-1 , lowerCAmelCase : int=0 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : int=None , lowerCAmelCase : Any=0 , lowerCAmelCase : Dict="gelu" , **lowerCAmelCase : Any , ):
super().__init__(**lowerCAmelCase )
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = relative_attention
lowerCAmelCase = max_relative_positions
lowerCAmelCase = pad_token_id
lowerCAmelCase = position_biased_input
# Backwards compatibility
if type(lowerCAmelCase ) == str:
lowerCAmelCase = [x.strip() for x in pos_att_type.lower().split("""|""" )]
lowerCAmelCase = pos_att_type
lowerCAmelCase = vocab_size
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = kwargs.get("""pooler_hidden_size""" , lowerCAmelCase )
lowerCAmelCase = pooler_dropout
lowerCAmelCase = pooler_hidden_act
class SCREAMING_SNAKE_CASE__ ( _a ):
@property
def __lowercase ( self : Optional[int] ):
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def __lowercase ( self : List[str] ):
return 12
def __lowercase ( self : int , lowerCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional["TensorType"] = None , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 40 , lowerCAmelCase : int = 40 , lowerCAmelCase : "PreTrainedTokenizerBase" = None , ):
lowerCAmelCase = super().generate_dummy_inputs(preprocessor=lowerCAmelCase , framework=lowerCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 155
| 0
|
def __lowerCamelCase ( __magic_name__ : Optional[Any] ):
a__: int =[0] * len(_lowerCamelCase )
a__: Optional[int] =[]
a__: Any =[]
a__: Tuple =0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowerCamelCase ) ):
if indegree[i] == 0:
queue.append(_lowerCamelCase )
while queue:
a__: str =queue.pop(0 )
cnt += 1
topo.append(_lowerCamelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_lowerCamelCase )
if cnt != len(_lowerCamelCase ):
print("Cycle exists" )
else:
print(_lowerCamelCase )
# Adjacency List of Graph
__UpperCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 363
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 42
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = torch.device("cpu")
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase = Image.open(requests.get(_A , stream=_A ).raw )
return im
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = dct.pop(_A )
lowerCAmelCase = val
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase = []
for k in state_dict.keys():
lowerCAmelCase = k
if ".pwconv" in k:
lowerCAmelCase = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
lowerCAmelCase = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
lowerCAmelCase = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
lowerCAmelCase = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
lowerCAmelCase = k_new.split(""".""" )
if ls[2].isdigit():
lowerCAmelCase = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
lowerCAmelCase = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowerCAmelCase = 10_00
lowerCAmelCase = """huggingface/label-files"""
lowerCAmelCase = """imagenet-1k-id2label.json"""
lowerCAmelCase = json.load(open(hf_hub_download(_A , _A , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowerCAmelCase = [3, 3, 6, 4]
lowerCAmelCase = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
lowerCAmelCase = [3, 3, 9, 6]
lowerCAmelCase = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
lowerCAmelCase = [4, 3, 10, 5]
lowerCAmelCase = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
lowerCAmelCase = [4, 4, 12, 6]
lowerCAmelCase = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
lowerCAmelCase = torch.hub.load_state_dict_from_url(_A , map_location="""cpu""" , check_hash=_A )
else:
lowerCAmelCase = torch.load(_A , map_location="""cpu""" )
lowerCAmelCase = checkpoint
lowerCAmelCase = create_rename_keys(_A )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_A , _A , _A )
# load HuggingFace model
lowerCAmelCase = SwiftFormerForImageClassification(_A ).eval()
hf_model.load_state_dict(_A )
# prepare test inputs
lowerCAmelCase = prepare_img()
lowerCAmelCase = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
lowerCAmelCase = processor(images=_A , return_tensors="""pt""" )
# compare outputs from both models
lowerCAmelCase = get_expected_output(_A )
lowerCAmelCase = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] , _A , atol=1e-3 )
Path(_A ).mkdir(exist_ok=_A )
print(F'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(_A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 46
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase__ ( _A : List[Any] ):
'''simple docstring'''
if "resnet-50" in model_name:
a__ =ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
a__ =ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
a__ =DetrConfig(use_timm_backbone=_A , backbone_config=_A )
# set label attributes
a__ ='''panoptic''' in model_name
if is_panoptic:
a__ =2_50
else:
a__ =91
a__ ='''huggingface/label-files'''
a__ ='''coco-detection-id2label.json'''
a__ =json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
a__ ={int(_A ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCAmelCase__ ( _A : Optional[int] ):
'''simple docstring'''
a__ =[]
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def UpperCAmelCase__ ( _A : str , _A : Union[str, Any] , _A : int ):
'''simple docstring'''
a__ =state_dict.pop(_A )
a__ =val
def UpperCAmelCase__ ( _A : Dict , _A : str=False ):
'''simple docstring'''
a__ =''''''
if is_panoptic:
a__ ='''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a__ =state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
a__ =state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a__ =in_proj_weight[:2_56, :]
a__ =in_proj_bias[:2_56]
a__ =in_proj_weight[2_56:5_12, :]
a__ =in_proj_bias[2_56:5_12]
a__ =in_proj_weight[-2_56:, :]
a__ =in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a__ =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
a__ =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a__ =in_proj_weight[:2_56, :]
a__ =in_proj_bias[:2_56]
a__ =in_proj_weight[2_56:5_12, :]
a__ =in_proj_bias[2_56:5_12]
a__ =in_proj_weight[-2_56:, :]
a__ =in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
a__ =state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
a__ =state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a__ =in_proj_weight_cross_attn[:2_56, :]
a__ =in_proj_bias_cross_attn[:2_56]
a__ =in_proj_weight_cross_attn[2_56:5_12, :]
a__ =in_proj_bias_cross_attn[2_56:5_12]
a__ =in_proj_weight_cross_attn[-2_56:, :]
a__ =in_proj_bias_cross_attn[-2_56:]
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
a__ =Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( _A : Dict , _A : Optional[Any]=None , _A : List[str]=False ):
'''simple docstring'''
a__, a__ =get_detr_config(_A )
# load original model from torch hub
a__ ={
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(F"""Converting model {model_name}...""" )
a__ =torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=_A ).eval()
a__ =detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_A ):
if is_panoptic:
a__ ='''detr.''' + src
rename_key(_A , _A , _A )
# query, key and value matrices need special treatment
read_in_q_k_v(_A , is_panoptic=_A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a__ ='''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
a__ =state_dict.pop(_A )
a__ =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a__ =state_dict.pop(_A )
a__ =val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
a__ =state_dict.pop(_A )
a__ =val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
a__ =state_dict.pop(_A )
a__ =val
# finally, create HuggingFace model and load state dict
a__ =DetrForSegmentation(_A ) if is_panoptic else DetrForObjectDetection(_A )
model.load_state_dict(_A )
model.eval()
# verify our conversion on an image
a__ ='''coco_panoptic''' if is_panoptic else '''coco_detection'''
a__ =DetrImageProcessor(format=_A )
a__ =processor(images=prepare_img() , return_tensors='''pt''' )
a__ =encoding['''pixel_values''']
a__ =detr(_A )
a__ =model(_A )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
processor.save_pretrained(_A )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
lowerCamelCase = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 188
| 0
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = TransfoXLTokenizer
UpperCamelCase : int = False
UpperCamelCase : int = False
def __A ( self ) -> Dict:
'''simple docstring'''
super().setUp()
lowerCamelCase = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def __A ( self , **A ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A )
def __A ( self , A ) -> List[str]:
'''simple docstring'''
lowerCamelCase = """<unk> UNwanted , running"""
lowerCamelCase = """<unk> unwanted, running"""
return input_text, output_text
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A )
lowerCamelCase = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(A , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [0, 4, 8, 7] )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = TransfoXLTokenizer(lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = TransfoXLTokenizer(lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = TransfoXLTokenizer(lower_case=A )
lowerCamelCase = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
lowerCamelCase = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(A ) , A )
self.assertEqual(tokenizer.convert_tokens_to_string(A ) , A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = len(A )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(A ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 368
|
class __lowercase :
"""simple docstring"""
def __init__( self ) -> None:
'''simple docstring'''
lowerCamelCase = {} # Mapping from char to TrieNode
lowerCamelCase = False
def __A ( self , A ) -> None:
'''simple docstring'''
for word in words:
self.insert(A )
def __A ( self , A ) -> None:
'''simple docstring'''
lowerCamelCase = self
for char in word:
if char not in curr.nodes:
lowerCamelCase = TrieNode()
lowerCamelCase = curr.nodes[char]
lowerCamelCase = True
def __A ( self , A ) -> bool:
'''simple docstring'''
lowerCamelCase = self
for char in word:
if char not in curr.nodes:
return False
lowerCamelCase = curr.nodes[char]
return curr.is_leaf
def __A ( self , A ) -> None:
'''simple docstring'''
def _delete(A , A , A ) -> bool:
if index == len(A ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCamelCase = False
return len(curr.nodes ) == 0
lowerCamelCase = word[index]
lowerCamelCase = curr.nodes.get(A )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCamelCase = _delete(A , A , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , A , 0 )
def __lowerCamelCase ( lowerCamelCase__ : TrieNode , lowerCamelCase__ : str ):
'''simple docstring'''
if node.is_leaf:
print(lowerCamelCase__ , end=""" """ )
for key, value in node.nodes.items():
print_words(lowerCamelCase__ , word + key )
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = """banana bananas bandana band apple all beast""".split()
lowerCamelCase = TrieNode()
root.insert_many(lowerCamelCase__ )
# print_words(root, "")
assert all(root.find(lowerCamelCase__ ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : bool ):
'''simple docstring'''
print(str(lowerCamelCase__ ) , """works!""" if passes else """doesn't work :(""" )
def __lowerCamelCase ( ):
'''simple docstring'''
assert test_trie()
def __lowerCamelCase ( ):
'''simple docstring'''
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 66
| 0
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : List[str] =["pixel_values"]
def __init__( self , UpperCAmelCase = True , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = 8 , **UpperCAmelCase , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase_ )
__snake_case : Dict = do_rescale
__snake_case : Any = rescale_factor
__snake_case : List[str] = do_pad
__snake_case : List[Any] = pad_size
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> str:
'''simple docstring'''
__snake_case : Tuple = get_image_size(UpperCAmelCase_ )
__snake_case : Any = (old_height // size + 1) * size - old_height
__snake_case : Optional[int] = (old_width // size + 1) * size - old_width
return pad(UpperCAmelCase_ , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=UpperCAmelCase_ )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ) -> Any:
'''simple docstring'''
__snake_case : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : int = do_pad if do_pad is not None else self.do_pad
__snake_case : Dict = pad_size if pad_size is not None else self.pad_size
__snake_case : Any = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
__snake_case : Dict = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_rescale:
__snake_case : str = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ ) for image in images]
if do_pad:
__snake_case : Tuple = [self.pad(UpperCAmelCase_ , size=UpperCAmelCase_ ) for image in images]
__snake_case : Tuple = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
__snake_case : Union[str, Any] = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
| 326
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10
| 0
|
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class _SCREAMING_SNAKE_CASE ( lowercase__ ):
snake_case__ : List[Any] = '''data2vec-audio'''
def __init__( self : str , __lowerCamelCase : Optional[Any]=32 , __lowerCamelCase : Tuple=768 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : int=12 , __lowerCamelCase : Union[str, Any]=3_072 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=0.0 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : int=1E-5 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : List[Any]=(512, 512, 512, 512, 512, 512, 512) , __lowerCamelCase : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __lowerCamelCase : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , __lowerCamelCase : int=False , __lowerCamelCase : Optional[int]=16 , __lowerCamelCase : Optional[Any]=19 , __lowerCamelCase : str=5 , __lowerCamelCase : List[Any]=0.05 , __lowerCamelCase : Dict=10 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Tuple=10 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Optional[int]="sum" , __lowerCamelCase : str=False , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Dict=256 , __lowerCamelCase : Any=(512, 512, 512, 512, 1_500) , __lowerCamelCase : Any=(5, 3, 3, 1, 1) , __lowerCamelCase : Dict=(1, 2, 3, 1, 1) , __lowerCamelCase : Tuple=512 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : int=2 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : List[str]=3 , __lowerCamelCase : int=2 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : Tuple , ):
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
UpperCamelCase :Optional[int] = hidden_size
UpperCamelCase :str = feat_extract_activation
UpperCamelCase :List[Any] = list(_a )
UpperCamelCase :str = list(_a )
UpperCamelCase :Optional[int] = list(_a )
UpperCamelCase :List[Any] = conv_bias
UpperCamelCase :Optional[Any] = num_conv_pos_embeddings
UpperCamelCase :Tuple = num_conv_pos_embedding_groups
UpperCamelCase :Any = conv_pos_kernel_size
UpperCamelCase :List[Any] = len(self.conv_dim )
UpperCamelCase :Tuple = num_hidden_layers
UpperCamelCase :Any = intermediate_size
UpperCamelCase :Any = hidden_act
UpperCamelCase :List[Any] = num_attention_heads
UpperCamelCase :Dict = hidden_dropout
UpperCamelCase :str = attention_dropout
UpperCamelCase :Union[str, Any] = activation_dropout
UpperCamelCase :List[Any] = feat_proj_dropout
UpperCamelCase :Optional[Any] = final_dropout
UpperCamelCase :Tuple = layerdrop
UpperCamelCase :List[str] = layer_norm_eps
UpperCamelCase :Tuple = initializer_range
UpperCamelCase :str = vocab_size
UpperCamelCase :int = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase :List[str] = mask_time_prob
UpperCamelCase :List[str] = mask_time_length
UpperCamelCase :Dict = mask_time_min_masks
UpperCamelCase :List[Any] = mask_feature_prob
UpperCamelCase :Optional[int] = mask_feature_length
UpperCamelCase :Optional[Any] = mask_feature_min_masks
# ctc loss
UpperCamelCase :Union[str, Any] = ctc_loss_reduction
UpperCamelCase :Dict = ctc_zero_infinity
# adapter
UpperCamelCase :Optional[int] = add_adapter
UpperCamelCase :Tuple = adapter_kernel_size
UpperCamelCase :int = adapter_stride
UpperCamelCase :Union[str, Any] = num_adapter_layers
UpperCamelCase :Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase :List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase :Optional[Any] = list(_a )
UpperCamelCase :str = list(_a )
UpperCamelCase :Union[str, Any] = list(_a )
UpperCamelCase :List[Any] = xvector_output_dim
@property
def _A ( self : Optional[Any] ):
return math.prod(self.conv_stride )
| 355
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : str = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : List[Any] = DebertaVaTokenizer
snake_case__ : Any = DebertaVaTokenizerFast
snake_case__ : Union[str, Any] = True
snake_case__ : Tuple = True
def _A ( self : Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase :Tuple = DebertaVaTokenizer(__lowerCamelCase , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self : int , __lowerCamelCase : Union[str, Any] ):
UpperCamelCase :str = """this is a test"""
UpperCamelCase :Dict = """this is a test"""
return input_text, output_text
def _A ( self : Tuple ):
UpperCamelCase :Optional[Any] = """<pad>"""
UpperCamelCase :Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def _A ( self : int ):
UpperCamelCase :Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(__lowerCamelCase ) , 30_001 )
def _A ( self : Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def _A ( self : str ):
# fmt: off
UpperCamelCase :Optional[int] = """ \tHeLLo!how \n Are yoU? """
UpperCamelCase :Any = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
UpperCamelCase :Optional[Any] = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase )
UpperCamelCase :Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Any = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase )
UpperCamelCase :List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def _A ( self : Dict ):
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def _A ( self : Optional[Any] ):
pass
def _A ( self : Optional[int] ):
# fmt: off
UpperCamelCase :Union[str, Any] = """I was born in 92000, and this is falsé."""
UpperCamelCase :int = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase :int = DebertaVaTokenizer(__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :int = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Optional[int] = DebertaVaTokenizerFast(__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : int ):
# fmt: off
UpperCamelCase :Union[str, Any] = """I was born in 92000, and this is falsé."""
UpperCamelCase :Any = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase :Tuple = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Any ):
# fmt: off
UpperCamelCase :Union[str, Any] = """I was born in 92000, and this is falsé."""
UpperCamelCase :List[Any] = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
UpperCamelCase :Tuple = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : str ):
# fmt: off
UpperCamelCase :List[str] = """I was born in 92000, and this is falsé."""
UpperCamelCase :int = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase :List[str] = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[str] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[Any] ):
# fmt: off
UpperCamelCase :Optional[Any] = """ \tHeLLo!how \n Are yoU? """
UpperCamelCase :Dict = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
UpperCamelCase :int = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Any = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
UpperCamelCase :Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : int ):
UpperCamelCase :int = self.get_tokenizer()
UpperCamelCase :str = self.get_rust_tokenizer()
UpperCamelCase :Dict = """I was born in 92000, and this is falsé."""
UpperCamelCase :List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
UpperCamelCase :Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[str] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
UpperCamelCase :Optional[int] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :int = self.get_rust_tokenizer()
UpperCamelCase :Tuple = tokenizer.encode(__lowerCamelCase )
UpperCamelCase :Dict = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Dict ):
UpperCamelCase :Optional[int] = """This is a test"""
UpperCamelCase :str = [13, 1, 4_398, 25, 21, 1_289]
UpperCamelCase :int = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
UpperCamelCase :Any = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
UpperCamelCase :str = DebertaVaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = DebertaVaTokenizerFast(__lowerCamelCase , keep_accents=__lowerCamelCase )
UpperCamelCase :Optional[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Any = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Optional[Any] = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :int = rust_tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
# fmt: off
UpperCamelCase :Optional[Any] = """I was born in 92000, and this is falsé."""
UpperCamelCase :Any = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
UpperCamelCase :Union[str, Any] = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
UpperCamelCase :Optional[Any] = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
UpperCamelCase :str = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Any = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :int = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Dict = rust_tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _A ( self : Optional[int] ):
UpperCamelCase :str = DebertaVaTokenizer(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = tokenizer.encode("""sequence builders""" )
UpperCamelCase :Any = tokenizer.encode("""multi-sequence build""" )
UpperCamelCase :Optional[int] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
UpperCamelCase :str = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __lowerCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __lowerCamelCase , )
@slow
def _A ( self : List[Any] ):
# fmt: off
UpperCamelCase :Union[str, Any] = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 62
| 0
|
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , lowerCAmelCase_ : str = "" , lowerCAmelCase_ : int = False):
"""simple docstring"""
lowercase_ = {}
# A node will be a leaf if the tree contains its word
lowercase_ = is_leaf
lowercase_ = prefix
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ = 0
for q, w in zip(self.prefix , lowerCAmelCase_):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
for word in words:
self.insert(lowerCAmelCase_)
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
if self.prefix == word:
lowercase_ = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase_ = RadixNode(prefix=lowerCAmelCase_ , is_leaf=lowerCAmelCase_)
else:
lowercase_ = self.nodes[word[0]]
lowercase_ , lowercase_ , lowercase_ = incoming_node.match(
lowerCAmelCase_)
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase_)
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase_ = remaining_prefix
lowercase_ = self.nodes[matching_string[0]]
lowercase_ = RadixNode(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = aux_node
if remaining_word == "":
lowercase_ = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
lowercase_ = self.nodes.get(word[0] , lowerCAmelCase_)
if not incoming_node:
return False
else:
lowercase_ , lowercase_ , lowercase_ = incoming_node.match(
lowerCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = self.nodes.get(word[0] , lowerCAmelCase_)
if not incoming_node:
return False
else:
lowercase_ , lowercase_ , lowercase_ = incoming_node.match(
lowerCAmelCase_)
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase_)
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes) == 1 and not self.is_leaf:
lowercase_ = list(self.nodes.values())[0]
lowercase_ = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase_ = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes) > 1:
lowercase_ = False
# If there is 1 edge, we merge it with its child
else:
lowercase_ = list(incoming_node.nodes.values())[0]
lowercase_ = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase_ = merging_node.nodes
return True
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Dict = 0):
"""simple docstring"""
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""")
for value in self.nodes.values():
value.print_tree(height + 1)
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
'''simple docstring'''
lowercase_ = """banana bananas bandana band apple all beast""".split()
lowercase_ = RadixNode()
root.insert_many(__lowerCAmelCase )
assert all(root.find(__lowerCAmelCase ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
'''simple docstring'''
assert test_trie()
def _SCREAMING_SNAKE_CASE () -> Tuple:
'''simple docstring'''
lowercase_ = RadixNode()
lowercase_ = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(__lowerCAmelCase )
print("""Words:""" , __lowerCAmelCase )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 136
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE__:Any = random.Random()
if is_torch_available():
import torch
def _lowerCamelCase( a , a=1.0 , a=None , a=None ):
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=True , ):
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = feature_size
__a = padding_value
__a = sampling_rate
__a = return_attention_mask
__a = do_normalize
def a__ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , lowerCamelCase=False , lowerCamelCase=False ):
def _flatten(lowerCamelCase ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
__a = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__a = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : str = ASTFeatureExtractor
def a__ ( self ):
__a = ASTFeatureExtractionTester(self )
def a__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
__a = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__a = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(lowerCamelCase )
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
@require_torch
def a__ ( self ):
import torch
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = np.random.rand(100 ).astype(np.floataa )
__a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a__ ( self , lowerCamelCase ):
from datasets import load_dataset
__a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__a = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def a__ ( self ):
# fmt: off
__a = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
__a = self._load_datasamples(1 )
__a = ASTFeatureExtractor()
__a = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
| 261
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_lowerCAmelCase : int
_lowerCAmelCase : int
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = [[] for _ in range(lowerCAmelCase )]
snake_case = size
def __getitem__( self , lowerCAmelCase ):
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def snake_case ( self ):
"""simple docstring"""
return self._size
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowerCAmelCase , lowerCAmelCase ) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = deque([start_vertex] )
snake_case = [None] * self.size
snake_case = 0
while queue:
snake_case = queue.popleft()
snake_case = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
snake_case = current_distance + edge.weight
snake_case = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase , lowerCAmelCase )
and new_distance >= dest_vertex_distance
):
continue
snake_case = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase="None" , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = relative_attention
snake_case = position_biased_input
snake_case = pos_att_type
snake_case = scope
def snake_case ( self ):
"""simple docstring"""
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = TFDebertaVaModel(config=lowerCAmelCase )
snake_case = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
snake_case = [input_ids, input_mask]
snake_case = model(lowerCAmelCase )
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = TFDebertaVaForMaskedLM(config=lowerCAmelCase )
snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_labels
snake_case = TFDebertaVaForSequenceClassification(config=lowerCAmelCase )
snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_labels
snake_case = TFDebertaVaForTokenClassification(config=lowerCAmelCase )
snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = TFDebertaVaForQuestionAnswering(config=lowerCAmelCase )
snake_case = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) = config_and_inputs
snake_case = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase : Any = (
{
"""feature-extraction""": TFDebertaVaModel,
"""fill-mask""": TFDebertaVaForMaskedLM,
"""question-answering""": TFDebertaVaForQuestionAnswering,
"""text-classification""": TFDebertaVaForSequenceClassification,
"""token-classification""": TFDebertaVaForTokenClassification,
"""zero-shot""": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : List[Any] = False
def snake_case ( self ):
"""simple docstring"""
snake_case = TFDebertaVaModelTester(self )
snake_case = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(lowerCAmelCase )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def snake_case ( self ):
"""simple docstring"""
pass
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
snake_case = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
snake_case = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
snake_case = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1E-4 )
| 149
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : str = "▁"
_UpperCAmelCase : Optional[int] = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCAmelCase : Dict = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_UpperCAmelCase : Optional[int] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Any = VOCAB_FILES_NAMES
__lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase = 1
UpperCamelCase = len(self.sp_model ) + self.fairseq_offset
UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , A_ , A_ = None , A_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(A_ , out_type=A_ )
def __UpperCamelCase ( self , A_ ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase = self.sp_model.PieceToId(A_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCamelCase ( self , A_ ) -> Optional[int]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = ''.join(A_ ).replace(A_ , ' ' ).strip()
return out_string
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
| 222
|
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = knapsack(lowercase , lowercase , lowercase , lowercase , index + 1 )
if weights[index] <= max_weight:
UpperCamelCase = values[index] + knapsack(
lowercase , lowercase , lowercase , max_weight - weights[index] , index + 1 )
return max(lowercase , lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 222
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def snake_case_ (UpperCamelCase : Any ):
'''simple docstring'''
_a = SwinvaConfig()
_a = swinva_name.split('''_''' )
_a = name_split[1]
if "to" in name_split[3]:
_a = int(name_split[3][-3:] )
else:
_a = int(name_split[3] )
if "to" in name_split[2]:
_a = int(name_split[2][-2:] )
else:
_a = int(name_split[2][6:] )
if model_size == "tiny":
_a = 96
_a = (2, 2, 6, 2)
_a = (3, 6, 12, 24)
elif model_size == "small":
_a = 96
_a = (2, 2, 18, 2)
_a = (3, 6, 12, 24)
elif model_size == "base":
_a = 128
_a = (2, 2, 18, 2)
_a = (4, 8, 16, 32)
else:
_a = 192
_a = (2, 2, 18, 2)
_a = (6, 12, 24, 48)
if "to" in swinva_name:
_a = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_a = 2_1841
_a = '''huggingface/label-files'''
_a = '''imagenet-22k-id2label.json'''
_a = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
_a = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
else:
_a = 1000
_a = '''huggingface/label-files'''
_a = '''imagenet-1k-id2label.json'''
_a = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
_a = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
_a = img_size
_a = num_classes
_a = embed_dim
_a = depths
_a = num_heads
_a = window_size
return config
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if "patch_embed.proj" in name:
_a = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_a = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
_a = '''encoder.''' + name
if "attn.proj" in name:
_a = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_a = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_a = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_a = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_a = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_a = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
_a = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
_a = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
_a = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
_a = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if name == "norm.weight":
_a = '''layernorm.weight'''
if name == "norm.bias":
_a = '''layernorm.bias'''
if "head" in name:
_a = name.replace('''head''' , '''classifier''' )
else:
_a = '''swinv2.''' + name
return name
def snake_case_ (UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(UpperCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
_a = key.split('''.''' )
_a = int(key_split[1] )
_a = int(key_split[3] )
_a = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[
dim : dim * 2
]
_a = val[-dim:]
else:
_a = val
return orig_state_dict
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = timm.create_model(UpperCamelCase , pretrained=UpperCamelCase )
timm_model.eval()
_a = get_swinva_config(UpperCamelCase )
_a = SwinvaForImageClassification(UpperCamelCase )
model.eval()
_a = convert_state_dict(timm_model.state_dict() , UpperCamelCase )
model.load_state_dict(UpperCamelCase )
_a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''' ) ) )
_a = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
_a = image_processor(images=UpperCamelCase , return_tensors='''pt''' )
_a = timm_model(inputs['''pixel_values'''] )
_a = model(**UpperCamelCase ).logits
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 )
print(f'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCamelCase )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization='''nandwalritik''' , commit_message='''Add model''' , )
if __name__ == "__main__":
_snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swinv2_name',
default='swinv2_tiny_patch4_window8_256',
type=str,
help='Name of the Swinv2 timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_snake_case : Dict = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 367
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : Dict = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 179
| 0
|
from __future__ import annotations
A : Tuple = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class A :
'''simple docstring'''
def __init__(self : int , _UpperCAmelCase : dict[str, list[str]] , _UpperCAmelCase : str ) -> None:
"""simple docstring"""
lowercase__ = graph
# mapping node to its parent in resulting breadth first tree
lowercase__ = {}
lowercase__ = source_vertex
def lowerCamelCase__ (self : List[Any] ) -> None:
"""simple docstring"""
lowercase__ = {self.source_vertex}
lowercase__ = None
lowercase__ = [self.source_vertex] # first in first out queue
while queue:
lowercase__ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_UpperCAmelCase )
lowercase__ = vertex
queue.append(_UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : str ) -> str:
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
lowercase__ = self.parent.get(_UpperCAmelCase )
if target_vertex_parent is None:
lowercase__ = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(_UpperCAmelCase )
return self.shortest_path(_UpperCAmelCase ) + f'''->{target_vertex}'''
if __name__ == "__main__":
A : Dict = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 305
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def UpperCamelCase ( __magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def UpperCamelCase ( __magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = gather(__magic_name__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def UpperCamelCase ( __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = [state.process_index]
lowercase__ = gather_object(__magic_name__ )
assert len(__magic_name__ ) == state.num_processes, f'''{gathered_obj}, {len(__magic_name__ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = broadcast(__magic_name__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def UpperCamelCase ( __magic_name__ : str ) -> Dict:
"""simple docstring"""
if state.is_main_process:
lowercase__ = torch.arange(state.num_processes + 1 ).to(state.device )
else:
lowercase__ = torch.arange(state.num_processes ).to(state.device )
lowercase__ = pad_across_processes(__magic_name__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
if state.num_processes != 2:
return
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = reduce(__magic_name__ , """sum""" )
lowercase__ = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__magic_name__ , __magic_name__ ), f'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( __magic_name__ : Dict ) -> int:
"""simple docstring"""
if state.num_processes != 2:
return
lowercase__ = create_tensor(__magic_name__ )
lowercase__ = reduce(__magic_name__ , """mean""" )
lowercase__ = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__magic_name__ , __magic_name__ ), f'''{reduced_tensor} != {truth_tensor}'''
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
main()
def UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
lowercase__ = PartialState()
state.print(f'''State: {state}''' )
state.print("""testing gather""" )
test_gather(__magic_name__ )
state.print("""testing gather_object""" )
test_gather_object(__magic_name__ )
state.print("""testing broadcast""" )
test_broadcast(__magic_name__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(__magic_name__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(__magic_name__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(__magic_name__ )
if __name__ == "__main__":
main()
| 305
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'vocab_file': 'spiece.model'}
__lowerCAmelCase = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
__lowerCAmelCase = {'bert_for_seq_generation': 5_1_2}
class UpperCAmelCase__ ( _lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : int = []
__UpperCAmelCase : int = ['''input_ids''', '''attention_mask''']
def __init__( self : Any ,_a : str ,_a : Optional[Any]="<s>" ,_a : Any="</s>" ,_a : Optional[Any]="<unk>" ,_a : List[Any]="<pad>" ,_a : List[str]="<::::>" ,_a : Optional[Dict[str, Any]] = None ,**_a : Optional[int] ,):
'''simple docstring'''
_a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowercase_ ,eos_token=lowercase_ ,unk_token=lowercase_ ,pad_token=lowercase_ ,sep_token=lowercase_ ,sp_model_kwargs=self.sp_model_kwargs ,**lowercase_ ,)
_a : Optional[int] = vocab_file
_a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def __lowercase ( self : int ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : List[str] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
'''simple docstring'''
_a : Tuple = self.__dict__.copy()
_a : Optional[int] = None
return state
def __setstate__( self : Any ,_a : Optional[int] ):
'''simple docstring'''
_a : Dict = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_a : Tuple = {}
_a : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase ( self : Any ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(lowercase_ ,out_type=lowercase_ )
def __lowercase ( self : Optional[int] ,_a : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.piece_to_id(lowercase_ )
def __lowercase ( self : Dict ,_a : str ):
'''simple docstring'''
_a : int = self.sp_model.IdToPiece(lowercase_ )
return token
def __lowercase ( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
_a : Dict = []
_a : Tuple = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
_a : Dict = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def __lowercase ( self : List[str] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Dict = os.path.join(
lowercase_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ ,'wb' ) as fi:
_a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 355
|
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 1_0**1_2 ):
"""simple docstring"""
_a : List[str] = 1
_a : Optional[int] = 0
_a : Any = 1
_a : List[str] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5
| 0
|
from __future__ import annotations
from typing import Any
def __A ( __lowerCAmelCase )-> int:
"""simple docstring"""
if not postfix_notation:
return 0
_UpperCAmelCase = {'+', '-', '*', '/'}
_UpperCAmelCase = []
for token in postfix_notation:
if token in operations:
_UpperCAmelCase , _UpperCAmelCase = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__lowerCAmelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__UpperCAmelCase = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
__UpperCAmelCase = json.load(f)
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return FSMTTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase : List[Any] = FSMTForConditionalGeneration.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
UpperCamelCase : int = F"""facebook/wmt19-{pair}"""
UpperCamelCase : Optional[int] = self.get_tokenizer(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = self.get_model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = bleu_data[pair]['src']
UpperCamelCase : Tuple = bleu_data[pair]['tgt']
UpperCamelCase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE_, return_tensors='pt', truncation=SCREAMING_SNAKE_CASE_, padding='longest' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = model.generate(
input_ids=batch.input_ids, num_beams=8, )
UpperCamelCase : Tuple = tokenizer.batch_decode(
SCREAMING_SNAKE_CASE_, skip_special_tokens=SCREAMING_SNAKE_CASE_, clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = calculate_bleu(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(scores['bleu'], SCREAMING_SNAKE_CASE_ )
| 119
| 0
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : list ) -> list:
"""simple docstring"""
if len(lowercase_ ) <= 1:
return [tuple(lowercase_ )]
lowerCAmelCase_ : List[Any] = []
def generate(lowerCAmelCase__ : int , lowerCAmelCase__ : list ):
lowerCAmelCase_ : Optional[int] = [0] * n
res.append(tuple(lowercase_ ) )
lowerCAmelCase_ : Any = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
lowerCAmelCase_ ,lowerCAmelCase_ : Dict = arr[i], arr[0]
else:
lowerCAmelCase_ ,lowerCAmelCase_ : int = arr[i], arr[c[i]]
res.append(tuple(lowercase_ ) )
c[i] += 1
lowerCAmelCase_ : Union[str, Any] = 0
else:
lowerCAmelCase_ : Optional[Any] = 0
i += 1
generate(len(lowercase_ ) , lowercase_ )
return res
if __name__ == "__main__":
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 357
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_, lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """maskformer-swin"""
_SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_2_4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : int=3 , SCREAMING_SNAKE_CASE_ : Dict=9_6 , SCREAMING_SNAKE_CASE_ : Optional[int]=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_ : List[Any]=[3, 6, 1_2, 2_4] , SCREAMING_SNAKE_CASE_ : List[str]=7 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-5 , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , **SCREAMING_SNAKE_CASE_ : str , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = image_size
lowerCAmelCase_ : Optional[Any] = patch_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : List[str] = embed_dim
lowerCAmelCase_ : Dict = depths
lowerCAmelCase_ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = num_heads
lowerCAmelCase_ : List[str] = window_size
lowerCAmelCase_ : Any = mlp_ratio
lowerCAmelCase_ : Any = qkv_bias
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Tuple = drop_path_rate
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : Any = use_absolute_embeddings
lowerCAmelCase_ : Optional[Any] = layer_norm_eps
lowerCAmelCase_ : str = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE_ ) - 1) )
lowerCAmelCase_ : List[Any] = ['stem'] + [F"stage{idx}" for idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) + 1 )]
lowerCAmelCase_ ,lowerCAmelCase_ : Tuple = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 289
| 0
|
from math import factorial
class __lowerCAmelCase :
def __init__( self :Any , __magic_name__ :int , __magic_name__ :List[str] ):
'''simple docstring'''
a = real
if isinstance(__magic_name__ , __magic_name__ ):
a = [1] * rank
else:
a = rank
def __repr__( self :Optional[Any] ):
'''simple docstring'''
return (
F'{self.real}+'
F'{"+".join(str(__magic_name__ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , __magic_name__ )
def __add__( self :Optional[int] , __magic_name__ :List[Any] ):
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
return Dual(self.real + other , self.duals )
a = self.duals.copy()
a = other.duals.copy()
if len(__magic_name__ ) > len(__magic_name__ ):
o_dual.extend([1] * (len(__magic_name__ ) - len(__magic_name__ )) )
elif len(__magic_name__ ) < len(__magic_name__ ):
s_dual.extend([1] * (len(__magic_name__ ) - len(__magic_name__ )) )
a = []
for i in range(len(__magic_name__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , __magic_name__ )
UpperCamelCase__ = __add__
def __sub__( self :Dict , __magic_name__ :Tuple ):
'''simple docstring'''
return self + other * -1
def __mul__( self :Union[str, Any] , __magic_name__ :Tuple ):
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
a = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , __magic_name__ )
a = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , __magic_name__ )
UpperCamelCase__ = __mul__
def __truediv__( self :Optional[Any] , __magic_name__ :Any ):
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
a = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , __magic_name__ )
raise ValueError
def __floordiv__( self :Dict , __magic_name__ :Any ):
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
a = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , __magic_name__ )
raise ValueError
def __pow__( self :Optional[int] , __magic_name__ :Dict ):
'''simple docstring'''
if n < 0 or isinstance(__magic_name__ , __magic_name__ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
a = self
for _ in range(n - 1 ):
x *= self
return x
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
if not callable(__lowerCamelCase ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(__lowerCamelCase , (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError("""differentiate() requires an int as input for order""" )
a = Dual(__lowerCamelCase , 1 )
a = func(__lowerCamelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __A ( __lowerCamelCase ) -> Union[str, Any]:
return y**2 * y**4
print(differentiate(f, 9, 2))
| 228
|
def __A ( __lowerCamelCase ) -> int:
a = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
a = hex_num[0] == """-"""
if is_negative:
a = hex_num[1:]
try:
a = int(__lowerCamelCase , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
a = """"""
while int_num > 0:
a = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 228
| 1
|
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__a: List[str] = logging.get_logger(__name__)
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
def constraint_to_multiple_of(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=0 , UpperCAmelCase=None ):
lowercase__ : Dict = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowercase__ : List[str] = math.floor(val / multiple ) * multiple
if x < min_val:
lowercase__ : str = math.ceil(val / multiple ) * multiple
return x
lowercase__ : Optional[int] = (output_size, output_size) if isinstance(UpperCAmelCase , UpperCAmelCase ) else output_size
lowercase__ , lowercase__ : Union[str, Any] = get_image_size(UpperCAmelCase )
lowercase__ , lowercase__ : List[Any] = output_size
# determine new height and width
lowercase__ : int = output_height / input_height
lowercase__ : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowercase__ : Dict = scale_width
else:
# fit height
lowercase__ : Tuple = scale_height
lowercase__ : int = constraint_to_multiple_of(scale_height * input_height , multiple=UpperCAmelCase )
lowercase__ : str = constraint_to_multiple_of(scale_width * input_width , multiple=UpperCAmelCase )
return (new_height, new_width)
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["pixel_values"]
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BILINEAR , __lowerCAmelCase = False , __lowerCAmelCase = 1 , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> None:
super().__init__(**__lowerCAmelCase )
lowercase__ : str = size if size is not None else {'''height''': 384, '''width''': 384}
lowercase__ : Tuple = get_size_dict(__lowerCAmelCase )
lowercase__ : Dict = do_resize
lowercase__ : str = size
lowercase__ : Dict = keep_aspect_ratio
lowercase__ : Union[str, Any] = ensure_multiple_of
lowercase__ : str = resample
lowercase__ : int = do_rescale
lowercase__ : List[Any] = rescale_factor
lowercase__ : List[str] = do_normalize
lowercase__ : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = 1 , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> np.ndarray:
lowercase__ : int = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowercase__ : Dict = get_resize_output_image_size(
__lowerCAmelCase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=__lowerCAmelCase , multiple=__lowerCAmelCase , )
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> List[Any]:
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> np.ndarray:
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ) -> PIL.Image.Image:
lowercase__ : Dict = do_resize if do_resize is not None else self.do_resize
lowercase__ : Optional[Any] = size if size is not None else self.size
lowercase__ : List[Any] = get_size_dict(__lowerCAmelCase )
lowercase__ : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowercase__ : Optional[Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowercase__ : Tuple = resample if resample is not None else self.resample
lowercase__ : Dict = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowercase__ : Tuple = image_std if image_std is not None else self.image_std
lowercase__ : Optional[int] = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase__ : str = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
lowercase__ : Union[str, Any] = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_rescale:
lowercase__ : List[Any] = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
lowercase__ : str = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
lowercase__ : Tuple = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
lowercase__ : str = {'''pixel_values''': images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> int:
lowercase__ : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__lowerCAmelCase ):
lowercase__ : Optional[Any] = target_sizes.numpy()
lowercase__ : Dict = []
for idx in range(len(__lowerCAmelCase ) ):
lowercase__ : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__lowerCAmelCase )
lowercase__ : Optional[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__lowerCAmelCase )
else:
lowercase__ : int = logits.argmax(dim=1 )
lowercase__ : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 214
|
'''simple docstring'''
import qiskit
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Dict = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
lowercase__ : Any = qiskit.QuantumCircuit(UpperCAmelCase , UpperCAmelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowercase__ : Any = qiskit.execute(UpperCAmelCase , UpperCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCAmelCase )
if __name__ == "__main__":
print(F'Total count for various states are: {single_qubit_measure(1, 1)}')
| 214
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] =logging.get_logger(__name__)
lowerCamelCase : Optional[Any] ={
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class __a ( A__ ):
_lowerCAmelCase : Dict = '''roc_bert'''
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Union[str, Any]=3_05_22 , SCREAMING_SNAKE_CASE : Dict=7_68 , SCREAMING_SNAKE_CASE : int=12 , SCREAMING_SNAKE_CASE : Dict=12 , SCREAMING_SNAKE_CASE : Any=30_72 , SCREAMING_SNAKE_CASE : Dict="gelu" , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Tuple=0.1 , SCREAMING_SNAKE_CASE : int=5_12 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : List[str]=0.0_2 , SCREAMING_SNAKE_CASE : Union[str, Any]=1e-1_2 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Tuple=0 , SCREAMING_SNAKE_CASE : List[str]="absolute" , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[Any]=7_68 , SCREAMING_SNAKE_CASE : Optional[Any]=9_10 , SCREAMING_SNAKE_CASE : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE : Optional[Any]=2_48_58 , SCREAMING_SNAKE_CASE : Any=True , **SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
UpperCamelCase__ : List[str] = vocab_size
UpperCamelCase__ : List[str] = max_position_embeddings
UpperCamelCase__ : Optional[Any] = hidden_size
UpperCamelCase__ : Optional[int] = num_hidden_layers
UpperCamelCase__ : int = num_attention_heads
UpperCamelCase__ : Optional[int] = intermediate_size
UpperCamelCase__ : Dict = hidden_act
UpperCamelCase__ : Dict = hidden_dropout_prob
UpperCamelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ : List[Any] = initializer_range
UpperCamelCase__ : int = type_vocab_size
UpperCamelCase__ : List[str] = layer_norm_eps
UpperCamelCase__ : str = use_cache
UpperCamelCase__ : Dict = enable_pronunciation
UpperCamelCase__ : str = enable_shape
UpperCamelCase__ : Any = pronunciation_embed_dim
UpperCamelCase__ : str = pronunciation_vocab_size
UpperCamelCase__ : List[str] = shape_embed_dim
UpperCamelCase__ : List[Any] = shape_vocab_size
UpperCamelCase__ : Tuple = concat_input
UpperCamelCase__ : Any = position_embedding_type
UpperCamelCase__ : Union[str, Any] = classifier_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 189
|
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCamelCase : Tuple =False
class __a ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCamelCase__ : List[Any] = torch.manual_seed(0 )
UpperCamelCase__ : List[Any] = pipe(
image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
UpperCamelCase__ : Union[str, Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : Optional[Any] = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 189
| 1
|
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = logging.get_logger()
# the current default level is logging.WARNING
lowercase__ = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[str]:
"""simple docstring"""
lowercase__ = logging.get_verbosity()
lowercase__ = logging.get_logger('transformers.models.bart.tokenization_bart' )
lowercase__ = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a ) as cl:
logger.warning(a )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a ) as cl:
logger.warning(a )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a ) as cl:
logger.warning(a )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(a )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
lowercase__ = logging.get_logger('transformers.models.bart.tokenization_bart' )
lowercase__ = os.getenv('TRANSFORMERS_VERBOSITY' , a )
lowercase__ = logging.log_levels[env_level_str]
lowercase__ = logging.get_verbosity()
self.assertEqual(
a , a , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
lowercase__ = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> List[str]:
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
lowercase__ = logging.logging.getLogger()
with CaptureLogger(a ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
lowercase__ = logging.get_logger('transformers.models.bart.tokenization_bart' )
lowercase__ = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(a ) as cl:
logger.warning_advice(a )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a ) as cl:
logger.warning_advice(a )
self.assertEqual(cl.out , msg + '\n' )
def __UpperCamelCase () -> List[Any]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 269
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : List[str] , a : Callable , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[dict] = None , a : Optional[int] = None , **a : str , )-> Tuple:
"""simple docstring"""
super().__init__(
features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowercase__ = Generator(
cache_dir=a , features=a , generator=a , gen_kwargs=a , **a , )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Dict:
"""simple docstring"""
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowercase__ = self.builder.as_dataset(
split='train' , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
| 269
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def __magic_name__ ( lowercase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =str(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =[n]
for i in range(1 , len(_lowerCAmelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def __magic_name__ ( lowercase ):
if len(str(_lowerCAmelCase ) ) > 3:
if not is_prime(int(str(_lowerCAmelCase )[-3:] ) ) or not is_prime(int(str(_lowerCAmelCase )[:3] ) ):
return False
return True
def __magic_name__ ( lowercase = 11 ):
SCREAMING_SNAKE_CASE_: int =[]
SCREAMING_SNAKE_CASE_: str =13
while len(_lowerCAmelCase ) != count:
if validate(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =list_truncated_nums(_lowerCAmelCase )
if all(is_prime(_lowerCAmelCase ) for i in list_nums ):
list_truncated_primes.append(_lowerCAmelCase )
num += 2
return list_truncated_primes
def __magic_name__ ( ):
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f"""{sum(compute_truncated_primes(1_1)) = }""")
| 173
|
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCamelCase = float("""nan""")
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
__lowercase =sys.stdout
__lowercase =open(_lowerCAmelCase , 'a')
def __getattr__( self : Any , _lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
return getattr(self.stdout , _lowerCAmelCase)
def __lowerCamelCase ( self : str , _lowerCAmelCase : int):
'''simple docstring'''
self.stdout.write(_lowerCAmelCase)
# strip tqdm codes
self.file.write(re.sub(R'^.*\r' , '' , _lowerCAmelCase , 0 , re.M))
def _A ( _lowerCAmelCase=80 , _lowerCAmelCase=False ):
"""simple docstring"""
__lowercase =[]
# deal with critical env vars
__lowercase =['CUDA_VISIBLE_DEVICES']
for key in env_keys:
__lowercase =os.environ.get(_lowerCAmelCase , _lowerCAmelCase )
if val is not None:
cmd.append(f"""{key}={val}""" )
# python executable (not always needed if the script is executable)
__lowercase =sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(_lowerCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__lowercase =[]
__lowercase =''
while len(_lowerCAmelCase ) > 0:
current_line += f"""{cmd.pop(0 )} """
if len(_lowerCAmelCase ) == 0 or len(_lowerCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_lowerCAmelCase )
__lowercase =''
return "\\\n".join(_lowerCAmelCase )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =re.sub(r'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
__lowercase =re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += f""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
__lowercase =re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
__lowercase =subprocess.run(_lowerCAmelCase , capture_output=_lowerCAmelCase , text=_lowerCAmelCase )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
__lowercase =variation.replace(' ' , '-' )
with open(Path(_lowerCAmelCase ) / f"""log.{prefix}.stdout.txt""" , 'w' ) as f:
f.write(result.stdout )
with open(Path(_lowerCAmelCase ) / f"""log.{prefix}.stderr.txt""" , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(f"""{output_dir}/all_results.json""" , 'r' , encoding='utf-8' ) as f:
__lowercase =json.load(_lowerCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
"""simple docstring"""
__lowercase =[]
__lowercase =[]
__lowercase =f"""{id}: {variation:<{longest_variation_len}}"""
__lowercase =f"""{preamble}: """
__lowercase =set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_lowerCAmelCase ) , desc=_lowerCAmelCase , leave=_lowerCAmelCase ):
__lowercase =process_run_single(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowercase =single_run_metrics[target_metric_key]
if not math.isnan(_lowerCAmelCase ):
metrics.append(_lowerCAmelCase )
results.append(_lowerCAmelCase )
outcome += "✓"
else:
outcome += "✘"
__lowercase =f"""\33[2K\r{outcome}"""
if len(_lowerCAmelCase ) > 0:
__lowercase ={k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__lowercase =round(mean_metrics[target_metric_key] , 2 )
__lowercase =f"""{outcome} {mean_target}"""
if len(_lowerCAmelCase ) > 1:
results_str += f""" {tuple(round(_lowerCAmelCase , 2 ) for x in results )}"""
print(_lowerCAmelCase )
__lowercase =variation
return mean_metrics
else:
print(_lowerCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def _A ( ):
"""simple docstring"""
__lowercase =torch.cuda.get_device_properties(torch.device('cuda' ) )
return f"""
Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =pd.DataFrame(_lowerCAmelCase )
__lowercase ='variation'
__lowercase ='diff_%'
__lowercase =nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__lowercase =df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_lowerCAmelCase ):
# as a fallback, use the minimal value as the sentinel
__lowercase =df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_lowerCAmelCase ):
__lowercase =df.apply(
lambda _lowerCAmelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
__lowercase =[variation_key, target_metric_key, diff_key, *report_metric_keys]
__lowercase =df.reindex(_lowerCAmelCase , axis='columns' ) # reorder cols
# capitalize
__lowercase =df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
__lowercase =df.rename(lambda _lowerCAmelCase : c.replace('_' , '<br>' ) , axis='columns' )
__lowercase =df.rename(lambda _lowerCAmelCase : c.replace('_' , '\n' ) , axis='columns' )
__lowercase =['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_lowerCAmelCase , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_lowerCAmelCase , floatfmt='.2f' )]
print('\n\n'.join(_lowerCAmelCase ) )
def _A ( ):
"""simple docstring"""
__lowercase =argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='Base cmd' , )
parser.add_argument(
'--variations' , default=_lowerCAmelCase , type=_lowerCAmelCase , nargs='+' , required=_lowerCAmelCase , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=_lowerCAmelCase , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=_lowerCAmelCase , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=_lowerCAmelCase , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=_lowerCAmelCase , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
__lowercase =parser.parse_args()
__lowercase =args.output_dir
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
__lowercase =get_base_command(_lowerCAmelCase , _lowerCAmelCase )
# split each dimension into its --foo variations
__lowercase =[list(map(str.strip , re.split(r'\|' , _lowerCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__lowercase =list(map(str.strip , map(' '.join , itertools.product(*_lowerCAmelCase ) ) ) )
__lowercase =max(len(_lowerCAmelCase ) for x in variations )
# split wanted keys
__lowercase =args.report_metric_keys.split()
# capture prints into a log file for convenience
__lowercase =f"""benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"""
print(f"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(f"""and this script's output is also piped into {report_fn}""" )
__lowercase =Tee(_lowerCAmelCase )
print(f"""\n*** Running {len(_lowerCAmelCase )} benchmarks:""" )
print(f"""Base command: {' '.join(_lowerCAmelCase )}""" )
__lowercase ='variation'
__lowercase =[]
for id, variation in enumerate(tqdm(_lowerCAmelCase , desc='Total completion: ' , leave=_lowerCAmelCase ) ):
__lowercase =base_cmd + variation.split()
results.append(
process_run(
id + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.target_metric_key , _lowerCAmelCase , args.repeat_times , _lowerCAmelCase , args.verbose , ) )
process_results(_lowerCAmelCase , args.target_metric_key , _lowerCAmelCase , args.base_variation , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 166
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class __a ( __UpperCamelCase ):
__lowercase : Dict = 'roberta-prelayernorm'
def __init__( self , lowerCAmelCase__=50_265 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3_072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowercase__: List[str] = vocab_size
lowercase__: Optional[int] = hidden_size
lowercase__: Union[str, Any] = num_hidden_layers
lowercase__: Dict = num_attention_heads
lowercase__: str = hidden_act
lowercase__: Optional[Any] = intermediate_size
lowercase__: Any = hidden_dropout_prob
lowercase__: str = attention_probs_dropout_prob
lowercase__: List[str] = max_position_embeddings
lowercase__: int = type_vocab_size
lowercase__: Any = initializer_range
lowercase__: Optional[Any] = layer_norm_eps
lowercase__: List[Any] = position_embedding_type
lowercase__: Optional[Any] = use_cache
lowercase__: List[Any] = classifier_dropout
class __a ( __UpperCamelCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__: int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__: str = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 371
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ = 101 ) -> Any:
'''simple docstring'''
lowercase__: Any = length
def __len__( self ) -> List[Any]:
'''simple docstring'''
return self.length
def __getitem__( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
return i
class __a :
def __call__( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return {"input_ids": torch.tensor(lowerCAmelCase__ ), "labels": torch.tensor(lowerCAmelCase__ )}
class __a ( nn.Module ):
def __init__( self ) -> Tuple:
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowercase__: List[str] = nn.Linear(120 , 80 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> int:
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __a ( __UpperCamelCase ):
@require_torch_neuroncore
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: int = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
lowercase__: Tuple = self.get_auto_remove_tmp_dir()
lowercase__: Optional[int] = F'--output_dir {output_dir}'.split()
lowercase__: int = ['torchrun'] + distributed_args + args
execute_subprocess_async(lowerCAmelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __a ( __UpperCamelCase ):
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: List[str] = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
lowercase__: Tuple = self.get_auto_remove_tmp_dir()
lowercase__: List[str] = F'--output_dir {output_dir}'.split()
lowercase__: int = ['torchrun'] + distributed_args + args
execute_subprocess_async(lowerCAmelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__lowerCAmelCase = HfArgumentParser((TrainingArguments,))
__lowerCAmelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
__lowerCAmelCase = DummyDataset(dataset_length)
def snake_case_ ( snake_case ) -> Dict:
lowercase__: str = list(range(len(snake_case ) ) )
lowercase__: Tuple = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
__lowerCAmelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__lowerCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase = 2
__lowerCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase = None
| 288
| 0
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 100 ):
lowerCAmelCase : Union[str, Any] = (n * (n + 1) // 2) ** 2
lowerCAmelCase : List[str] = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 60
|
"""simple docstring"""
from math import factorial
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Optional[int] , snake_case: Dict , snake_case: int ) -> Tuple:
snake_case_ :List[Any] = real
if isinstance(snake_case , snake_case ):
snake_case_ :Tuple = [1] * rank
else:
snake_case_ :Optional[Any] = rank
def __repr__( self: List[str] ) -> Tuple:
return (
f"""{self.real}+"""
f"""{'+'.join(str(snake_case )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
snake_case_ :Any = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , snake_case )
def __add__( self: Optional[int] , snake_case: Dict ) -> List[str]:
if not isinstance(snake_case , snake_case ):
return Dual(self.real + other , self.duals )
snake_case_ :List[Any] = self.duals.copy()
snake_case_ :Tuple = other.duals.copy()
if len(snake_case ) > len(snake_case ):
o_dual.extend([1] * (len(snake_case ) - len(snake_case )) )
elif len(snake_case ) < len(snake_case ):
s_dual.extend([1] * (len(snake_case ) - len(snake_case )) )
snake_case_ :Dict = []
for i in range(len(snake_case ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , snake_case )
_A : str = __add__
def __sub__( self: Tuple , snake_case: Union[str, Any] ) -> Tuple:
return self + other * -1
def __mul__( self: str , snake_case: Tuple ) -> Optional[Any]:
if not isinstance(snake_case , snake_case ):
snake_case_ :Dict = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , snake_case )
snake_case_ :int = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , snake_case )
_A : int = __mul__
def __truediv__( self: List[str] , snake_case: List[str] ) -> List[str]:
if not isinstance(snake_case , snake_case ):
snake_case_ :Optional[Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , snake_case )
raise ValueError
def __floordiv__( self: int , snake_case: List[Any] ) -> Any:
if not isinstance(snake_case , snake_case ):
snake_case_ :Optional[int] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , snake_case )
raise ValueError
def __pow__( self: Optional[Any] , snake_case: Optional[int] ) -> List[Any]:
if n < 0 or isinstance(snake_case , snake_case ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
snake_case_ :str = self
for _ in range(n - 1 ):
x *= self
return x
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
if not callable(_lowercase ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(_lowercase, (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(_lowercase, _lowercase ):
raise ValueError("""differentiate() requires an int as input for order""" )
snake_case_ :Optional[Any] = Dual(_lowercase, 1 )
snake_case_ :List[Any] = func(_lowercase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def A_ ( _lowercase ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 66
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case : str = logging.get_logger(__name__)
__snake_case : List[Any] = """▁"""
__snake_case : Tuple = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
__snake_case : List[Any] = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
__snake_case : Dict = {"""vinai/bartpho-syllable""": 10_24}
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
lowerCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = monolingual_vocab_file
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowerCAmelCase__ = {}
lowerCAmelCase__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_UpperCamelCase ) not in self.fairseq_tokens_to_ids:
lowerCAmelCase__ = cnt
cnt += 1
with open(_UpperCamelCase , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
lowerCAmelCase__ = line.strip().split()[0]
lowerCAmelCase__ = len(self.fairseq_tokens_to_ids )
if str(_UpperCamelCase ) not in self.fairseq_tokens_to_ids:
lowerCAmelCase__ = len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = ''.join(_UpperCamelCase ).replace(_UpperCamelCase , ' ' ).strip()
return out_string
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase__ = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , 'wb' ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_UpperCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"{str(_UpperCamelCase )} \n" )
return out_vocab_file, out_monolingual_vocab_file
| 355
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__snake_case : Optional[int] = """\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
__snake_case : List[str] = """\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
"""
__snake_case : Dict = """
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
'score' (float): TER score (num_edits / sum_ref_lengths * 100)
'num_edits' (int): The cumulative number of edits
'ref_length' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __SCREAMING_SNAKE_CASE ( datasets.Metric):
def UpperCamelCase__ ( self ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , ):
"""simple docstring"""
lowerCAmelCase__ = len(references[0] )
if any(len(_UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowerCAmelCase__ = [[refs[i] for refs in references] for i in range(_UpperCamelCase )]
lowerCAmelCase__ = TER(
normalized=_UpperCamelCase , no_punct=_UpperCamelCase , asian_support=_UpperCamelCase , case_sensitive=_UpperCamelCase , )
lowerCAmelCase__ = sb_ter.corpus_score(_UpperCamelCase , _UpperCamelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 122
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[int]:
warnings.warn(
"""The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PerceiverImageProcessor instead.""" ,__lowerCamelCase ,)
super().__init__(*__lowerCamelCase ,**__lowerCamelCase )
| 37
|
"""simple docstring"""
import requests
a_ = """""" # <-- Put your OpenWeatherMap appid here!
a_ = """https://api.openweathermap.org/data/2.5/"""
def __lowercase ( snake_case_ : str = "Chicago" ,snake_case_ : str = APPID ) ->dict:
'''simple docstring'''
return requests.get(URL_BASE + '''weather''' ,params=locals() ).json()
def __lowercase ( snake_case_ : str = "Kolkata, India" ,snake_case_ : str = APPID ) ->dict:
'''simple docstring'''
return requests.get(URL_BASE + '''forecast''' ,params=locals() ).json()
def __lowercase ( snake_case_ : float = 55.68 ,snake_case_ : float = 12.57 ,snake_case_ : str = APPID ) ->dict:
'''simple docstring'''
return requests.get(URL_BASE + '''onecall''' ,params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
a_ = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 179
| 0
|
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = """Hello, World!"""
SCREAMING_SNAKE_CASE_ = """en_XX"""
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Path("""data_bin""" )
SCREAMING_SNAKE_CASE = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(_SCREAMING_SNAKE_CASE ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_SCREAMING_SNAKE_CASE ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_SCREAMING_SNAKE_CASE ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = xmod.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
SCREAMING_SNAKE_CASE = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = XmodForSequenceClassification(_SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(_SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE = xmod_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE = xmod_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
SCREAMING_SNAKE_CASE = xmod_sent_encoder.layernorm_embedding.weight
SCREAMING_SNAKE_CASE = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE = xmod_sent_encoder.layers[i]
# self attention
SCREAMING_SNAKE_CASE = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
SCREAMING_SNAKE_CASE = xmod_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE = xmod_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE = xmod_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE = xmod_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE = xmod_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE = xmod_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
SCREAMING_SNAKE_CASE = xmod_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE = xmod_layer.self_attn.out_proj.bias
SCREAMING_SNAKE_CASE = xmod_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE = xmod_layer.self_attn_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
SCREAMING_SNAKE_CASE = xmod_layer.fca.weight
SCREAMING_SNAKE_CASE = xmod_layer.fca.bias
# output
SCREAMING_SNAKE_CASE = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
SCREAMING_SNAKE_CASE = xmod_layer.fca.weight
SCREAMING_SNAKE_CASE = xmod_layer.fca.bias
SCREAMING_SNAKE_CASE = xmod_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
SCREAMING_SNAKE_CASE = xmod_layer.adapter_layer_norm.weight
SCREAMING_SNAKE_CASE = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
SCREAMING_SNAKE_CASE = bert_output.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE = xmod_layer.adapter_modules[lang_code]
SCREAMING_SNAKE_CASE = from_adapter.fca.weight
SCREAMING_SNAKE_CASE = from_adapter.fca.bias
SCREAMING_SNAKE_CASE = from_adapter.fca.weight
SCREAMING_SNAKE_CASE = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
SCREAMING_SNAKE_CASE = xmod_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE = xmod_sent_encoder.layer_norm.bias
if classification_head:
SCREAMING_SNAKE_CASE = xmod.model.classification_heads["""mnli"""].dense.weight
SCREAMING_SNAKE_CASE = xmod.model.classification_heads["""mnli"""].dense.bias
SCREAMING_SNAKE_CASE = xmod.model.classification_heads["""mnli"""].out_proj.weight
SCREAMING_SNAKE_CASE = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE = xmod.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE = xmod.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE = xmod.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE = xmod.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE = xmod.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE = xmod.encode(_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE )[0]
if classification_head:
SCREAMING_SNAKE_CASE = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_SCREAMING_SNAKE_CASE ) )
else:
SCREAMING_SNAKE_CASE = xmod.model(_SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
SCREAMING_SNAKE_CASE = torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_SCREAMING_SNAKE_CASE ).mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 193
|
def __lowercase ( _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 10_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 0
for divide_by_number in range(_SCREAMING_SNAKE_CASE , digit + 1 ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = divide_by_number
else:
has_been_divided.append(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 193
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72
|
UpperCAmelCase__ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_lowercase ='''Morse code here!'''
print(__snake_case )
_lowercase =encrypt(__snake_case )
print(__snake_case )
_lowercase =decrypt(__snake_case )
print(__snake_case )
if __name__ == "__main__":
main()
| 5
| 0
|
def lowerCamelCase__ ( UpperCamelCase__ : str ) -> list[int]:
'''simple docstring'''
_snake_case = [0 for i in range(len(UpperCamelCase__ ) )]
# initialize interval's left pointer and right pointer
_snake_case , _snake_case = 0, 0
for i in range(1 , len(UpperCamelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
_snake_case = min(right_pointer - i + 1 , z_result[i - left_pointer] )
_snake_case = min_edge
while go_next(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
_snake_case , _snake_case = i, i + z_result[i] - 1
return z_result
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(UpperCamelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> int:
'''simple docstring'''
_snake_case = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
_snake_case = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(UpperCamelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCamelCase_ :
@property
def lowerCAmelCase ( self ) -> int:
return self.get_dummy_input()
@property
def lowerCAmelCase ( self ) -> Optional[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def lowerCAmelCase ( self , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ) -> List[str]:
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase_ )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase_ , device=lowerCAmelCase_ )
return dummy_input
def lowerCAmelCase ( self ) -> Tuple:
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
unet_block.to(lowerCAmelCase_ )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ )
assert torch_all_close(output_slice.flatten() , lowerCAmelCase_ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def lowerCAmelCase ( self ) -> Tuple:
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_snake_case = model(**lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = output[0]
_snake_case = torch.device(lowerCAmelCase_ )
_snake_case = randn_tensor(output.shape , device=lowerCAmelCase_ )
_snake_case = torch.nn.functional.mse_loss(lowerCAmelCase_ , lowerCAmelCase_ )
loss.backward()
| 295
| 1
|
"""simple docstring"""
lowerCamelCase__ = 65_521
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[Any] = 1
__lowerCAmelCase : Union[str, Any] = 0
for plain_chr in plain_text:
__lowerCAmelCase : int = (a + ord(_UpperCamelCase )) % MOD_ADLER
__lowerCAmelCase : List[str] = (b + a) % MOD_ADLER
return (b << 16) | a
| 86
|
"""simple docstring"""
UpperCAmelCase__ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
UpperCAmelCase__ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 1_2,
"""Pm""": 1_5,
"""Em""": 1_8,
"""Zm""": 2_1,
"""Ym""": 2_4,
}
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = from_type.lower().strip("""s""" )
_UpperCAmelCase = to_type.lower().strip("""s""" )
_UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase )
_UpperCAmelCase = UNIT_SYMBOL.get(lowercase ,lowercase )
if from_sanitized not in METRIC_CONVERSION:
_UpperCAmelCase = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(lowercase )}'''
)
raise ValueError(lowercase )
if to_sanitized not in METRIC_CONVERSION:
_UpperCAmelCase = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {", ".join(lowercase )}'''
)
raise ValueError(lowercase )
_UpperCAmelCase = METRIC_CONVERSION[from_sanitized]
_UpperCAmelCase = METRIC_CONVERSION[to_sanitized]
_UpperCAmelCase = 1
if from_exponent > to_exponent:
_UpperCAmelCase = from_exponent - to_exponent
else:
_UpperCAmelCase = -(to_exponent - from_exponent)
return value * pow(10 ,lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 289
| 0
|
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
lowerCAmelCase: Optional[Any] = ''
lowerCAmelCase: List[Any] = ''
lowerCAmelCase: str = ''
lowerCAmelCase: Union[str, Any] = ''
def lowerCamelCase__ ( _A ):
# authorize twitter, initialize tweepy
a : Tuple = tweepy.OAuthHandler(_A , _A )
auth.set_access_token(_A , _A )
a : Dict = tweepy.API(_A )
# initialize a list to hold all the tweepy Tweets
a : Union[str, Any] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
a : List[str] = api.user_timeline(screen_name=_A , count=200 )
# save most recent tweets
alltweets.extend(_A )
# save the id of the oldest tweet less one
a : Any = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_A ) > 0:
print(f"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
a : Any = api.user_timeline(
screen_name=_A , count=200 , max_id=_A )
# save most recent tweets
alltweets.extend(_A )
# update the id of the oldest tweet less one
a : List[Any] = alltweets[-1].id - 1
print(f"""...{len(_A )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
a : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"""new_{screen_name}_tweets.csv""" , 'w' ) as f:
a : Dict = csv.writer(_A )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(_A )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 96
|
'''simple docstring'''
import math
class a__:
def __init__( self : int , __snake_case : Any=0 ): # a graph with Node 0,1,...,N-1
a : List[Any] = n
a : Optional[Any] = [
[math.inf for j in range(0 , __snake_case )] for i in range(0 , __snake_case )
] # adjacency matrix for weight
a : Dict = [
[math.inf for j in range(0 , __snake_case )] for i in range(0 , __snake_case )
] # dp[i][j] stores minimum distance from i to j
def lowercase_ ( self : int , __snake_case : str , __snake_case : Tuple , __snake_case : int ):
a : str = w
def lowercase_ ( self : int ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
a : str = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowercase_ ( self : List[str] , __snake_case : Any , __snake_case : List[str] ):
return self.dp[u][v]
if __name__ == "__main__":
lowerCAmelCase: Any = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 96
| 1
|
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : Dict = {'vocab_file': 'vocab.txt'}
_a : str = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
_a : List[str] = {
'openbmb/cpm-ant-10b': 1_024,
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Tuple:
_lowerCAmelCase : List[str] = collections.OrderedDict()
with open(_lowerCamelCase ,"""r""" ,encoding="""utf-8""" ) as reader:
_lowerCAmelCase : Tuple = reader.readlines()
for index, token in enumerate(_lowerCamelCase ):
_lowerCAmelCase : str = token.rstrip("""\n""" )
_lowerCAmelCase : str = index
return vocab
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__="<unk>" , a__=200 ):
_lowerCAmelCase : Optional[Any] = vocab
_lowerCAmelCase : Any = unk_token
_lowerCAmelCase : int = max_input_chars_per_word
def __A ( self , a__ ):
_lowerCAmelCase : Any = list(a__ )
if len(a__ ) > self.max_input_chars_per_word:
return [self.unk_token]
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Dict = []
while start < len(a__ ):
_lowerCAmelCase : Tuple = len(a__ )
_lowerCAmelCase : int = None
while start < end:
_lowerCAmelCase : str = """""".join(chars[start:end] )
if substr in self.vocab:
_lowerCAmelCase : int = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(a__ )
_lowerCAmelCase : Union[str, Any] = end
return sub_tokens
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = ["input_ids", "attention_mask"]
_UpperCamelCase : str = False
def __init__( self , a__ , a__="<d>" , a__="</d>" , a__="<s>" , a__="</s>" , a__="<pad>" , a__="<unk>" , a__="</n>" , a__="</_>" , a__="left" , **a__ , ):
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=a__ , eod_token=a__ , bos_token=a__ , eos_token=a__ , pad_token=a__ , unk_token=a__ , line_token=a__ , space_token=a__ , padding_side=a__ , **a__ , )
_lowerCAmelCase : Union[str, Any] = bod_token
_lowerCAmelCase : List[Any] = eod_token
_lowerCAmelCase : List[str] = load_vocab(a__ )
_lowerCAmelCase : Tuple = self.encoder[space_token]
_lowerCAmelCase : Optional[Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_lowerCAmelCase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a__ : x[1] ) )
_lowerCAmelCase : int = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Optional[int] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __A ( self ):
return self.encoder[self.bod_token]
@property
def __A ( self ):
return self.encoder[self.eod_token]
@property
def __A ( self ):
return self.encoder["\n"]
@property
def __A ( self ):
return len(self.encoder )
def __A ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self , a__ ):
_lowerCAmelCase : Dict = []
for x in jieba.cut(a__ , cut_all=a__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(a__ ) )
return output_tokens
def __A ( self , a__ , **a__ ):
_lowerCAmelCase : Any = [i for i in token_ids if i >= 0]
_lowerCAmelCase : str = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(a__ , **a__ )
def __A ( self , a__ ):
return token in self.encoder
def __A ( self , a__ ):
return "".join(a__ )
def __A ( self , a__ ):
return self.encoder.get(a__ , self.encoder.get(self.unk_token ) )
def __A ( self , a__ ):
return self.decoder.get(a__ , self.unk_token )
def __A ( self , a__ , a__ = None ):
if os.path.isdir(a__ ):
_lowerCAmelCase : int = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
_lowerCAmelCase : List[Any] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
_lowerCAmelCase : Any = 0
if " " in self.encoder:
_lowerCAmelCase : int = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
_lowerCAmelCase : int = self.encoder["""\n"""]
del self.encoder["\n"]
_lowerCAmelCase : List[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda a__ : x[1] ) )
with open(a__ , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
""" Please check that the vocabulary is not corrupted!""" )
_lowerCAmelCase : List[str] = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def __A ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __A ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is not None:
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ ))
return [1] + ([0] * len(a__ ))
| 44
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : Any , *lowerCAmelCase : Any , **lowerCAmelCase : List[str]) -> None:
"""simple docstring"""
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 317
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : List[Any] )-> int:
lowerCamelCase__ : List[Any] =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase__ : Any =get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(lowerCamelCase ), torch_builtin(lowerCamelCase ) ) )
self.assertFalse(torch.allclose(gelu_python(lowerCamelCase ), gelu_new(lowerCamelCase ) ) )
def snake_case ( self : str )-> Optional[Any]:
lowerCamelCase__ : List[str] =torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase__ : Any =get_activation('''gelu''' )
lowerCamelCase__ : List[Any] =get_activation('''gelu_10''' )
lowerCamelCase__ : Dict =torch_builtin(lowerCamelCase )
lowerCamelCase__ : str =geluaa(lowerCamelCase )
lowerCamelCase__ : str =torch.where(y_gelu_aa < 10.0, 1, 0 )
self.assertTrue(torch.max(lowerCamelCase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask, y_gelu_aa * clipped_mask ) )
def snake_case ( self : Any )-> int:
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(lowerCamelCase ):
get_activation('''bogus''' )
with self.assertRaises(lowerCamelCase ):
get_activation(lowerCamelCase )
def snake_case ( self : List[Any] )-> List[Any]:
lowerCamelCase__ : List[str] =get_activation('''gelu''' )
lowerCamelCase__ : Any =1
lowerCamelCase__ : Union[str, Any] =get_activation('''gelu''' )
self.assertEqual(acta.a, 1 )
with self.assertRaises(lowerCamelCase ):
lowerCamelCase__ : Optional[Any] =acta.a
| 272
|
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def snake_case__ ( ):
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 272
| 1
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCAmelCase ( lowercase , lowercase=False ):
"""simple docstring"""
try:
__lowercase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowercase = default
else:
# KEY is set, convert it to True or False.
try:
__lowercase = strtobool(lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
__a : Dict = parse_flag_from_env("""RUN_SLOW""", default=False)
__a : List[Any] = parse_flag_from_env("""RUN_REMOTE""", default=False)
__a : Optional[int] = parse_flag_from_env("""RUN_LOCAL""", default=True)
__a : Optional[int] = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
__a : str = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
__a : Union[str, Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
__a : Tuple = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
__a : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
__a : Optional[Any] = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
__a : Any = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
__a : List[Any] = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
__lowercase = unittest.skip('''test requires faiss''' )(lowercase )
return test_case
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
__lowercase = unittest.skip('''test requires regex''' )(lowercase )
return test_case
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
__lowercase = unittest.skip('''test requires elasticsearch''' )(lowercase )
return test_case
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
__lowercase = unittest.skip('''test requires sqlalchemy''' )(lowercase )
return test_case
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
__lowercase = unittest.skip('''test requires PyTorch''' )(lowercase )
return test_case
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not config.TF_AVAILABLE:
__lowercase = unittest.skip('''test requires TensorFlow''' )(lowercase )
return test_case
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
__lowercase = unittest.skip('''test requires JAX''' )(lowercase )
return test_case
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
__lowercase = unittest.skip('''test requires Pillow''' )(lowercase )
return test_case
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(lowercase )
else:
return test_case
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(lowercase )
else:
return test_case
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(lowercase )
else:
return test_case
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
def _require_spacy_model(lowercase ):
try:
import spacy # noqa F401
spacy.load(lowercase )
except ImportError:
return unittest.skip('''test requires spacy''' )(lowercase )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(lowercase ) )(lowercase )
else:
return test_case
return _require_spacy_model
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(lowercase )
else:
return test_case
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(lowercase )
else:
return test_case
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
__lowercase = unittest.skip('''test is slow''' )(lowercase )
return test_case
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
__lowercase = unittest.skip('''test is local''' )(lowercase )
return test_case
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
__lowercase = unittest.skip('''test is packaged''' )(lowercase )
return test_case
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
__lowercase = unittest.skip('''test requires remote''' )(lowercase )
return test_case
def UpperCAmelCase ( *lowercase ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(lowercase ) and name.startswith('''test''' ):
for decorator in decorators:
__lowercase = decorator(lowercase )
setattr(cls , lowercase , lowercase )
return cls
return decorate
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
pass
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Tuple = 0
__a : Optional[int] = 1
__a : Any = 2
@contextmanager
def UpperCAmelCase ( lowercase=OfflineSimulationMode.CONNECTION_FAILS , lowercase=1E-16 ):
"""simple docstring"""
__lowercase = requests.Session().request
def timeout_request(lowercase , lowercase , lowercase , **lowercase ):
# Change the url to an invalid url so that the connection hangs
__lowercase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
F"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
__lowercase = timeout
try:
return online_request(lowercase , lowercase , **lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__lowercase = url
__lowercase = e.args[0]
__lowercase = (max_retry_error.args[0].replace('''10.255.255.1''' , F"OfflineMock[{url}]" ),)
__lowercase = (max_retry_error,)
raise
def raise_connection_error(lowercase , lowercase , **lowercase ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowercase ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def UpperCAmelCase ( *lowercase , **lowercase ):
"""simple docstring"""
__lowercase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*lowercase , **lowercase ) as tmp_dir:
try:
os.chdir(lowercase )
yield
finally:
os.chdir(lowercase )
@contextmanager
def UpperCAmelCase ( ):
"""simple docstring"""
import gc
gc.collect()
__lowercase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCAmelCase ( ):
"""simple docstring"""
import gc
gc.collect()
__lowercase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
return deepcopy(lowercase ).integers(0 , 100 , 10 ).tolist() == deepcopy(lowercase ).integers(0 , 100 , 10 ).tolist()
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowercase , *lowercase , **lowercase ):
try:
return func(*lowercase , **lowercase )
except HTTPError as err:
if str(lowercase ).startswith('''500''' ) or str(lowercase ).startswith('''502''' ):
pytest.xfail(str(lowercase ) )
raise err
return decorator.decorator(_wrapper , lowercase )
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = returncode
__lowercase = stdout
__lowercase = stderr
async def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
while True:
__lowercase = await stream.readline()
if line:
callback(lowercase )
else:
break
async def UpperCAmelCase ( lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=False , lowercase=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(lowercase ) )
__lowercase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowercase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowercase = []
__lowercase = []
def tee(lowercase , lowercase , lowercase , lowercase="" ):
__lowercase = line.decode('''utf-8''' ).rstrip()
sink.append(lowercase )
if not quiet:
print(lowercase , lowercase , file=lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda lowercase : tee(lowercase , lowercase , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda lowercase : tee(lowercase , lowercase , sys.stderr , label='''stderr:''' ) ),
] , timeout=lowercase , )
return _RunOutput(await p.wait() , lowercase , lowercase )
def UpperCAmelCase ( lowercase , lowercase=None , lowercase=None , lowercase=180 , lowercase=False , lowercase=True ):
"""simple docstring"""
__lowercase = asyncio.get_event_loop()
__lowercase = loop.run_until_complete(
_stream_subprocess(lowercase , env=lowercase , stdin=lowercase , timeout=lowercase , quiet=lowercase , echo=lowercase ) )
__lowercase = ''' '''.join(lowercase )
if result.returncode > 0:
__lowercase = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"'{cmd_str}' produced no output." )
return result
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
__lowercase = re.sub(r'''^gw''' , '''''' , lowercase , 0 , re.M )
return int(lowercase )
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = 29500
__lowercase = pytest_xdist_worker_id()
return port + uniq_delta
| 210
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__a : List[Any] = logging.getLogger(__name__)
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=lowercase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=lowercase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=lowercase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=lowercase , default=1000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=lowercase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=lowercase , type=lowercase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=lowercase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=lowercase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
__lowercase = parser.parse_args()
return args
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
def fn(lowercase ):
return tokenizer(examples['''text'''] )
return fn
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
__lowercase = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
__lowercase = tf.train.Features(feature=lowercase )
__lowercase = tf.train.Example(features=lowercase )
__lowercase = example.SerializeToString()
records.append(lowercase )
return records
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__lowercase = min(len(lowercase ) , args.limit )
__lowercase = dataset.select(range(lowercase ) )
print(F"Limiting the dataset to {args.limit} entries." )
__lowercase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__lowercase = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
else:
__lowercase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__lowercase = tokenize_function(lowercase )
__lowercase = dataset.map(lowercase , batched=lowercase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowercase ):
# Concatenate all texts.
__lowercase = {k: sum(examples[k] , [] ) for k in examples.keys()}
__lowercase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__lowercase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__lowercase = {
k: [t[i : i + args.max_length] for i in range(0 , lowercase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__lowercase = dataset_tokenized.map(lowercase , batched=lowercase , batch_size=1000 , num_proc=4 )
__lowercase = 0
__lowercase = 0
for shard in range(0 , len(lowercase ) , args.shard_size ):
__lowercase = grouped_dataset[shard : shard + args.shard_size]
__lowercase = len(dataset_snapshot['''input_ids'''] )
__lowercase = os.path.join(lowercase , F"dataset-{shard_count}-{records_containing}.tfrecord" )
__lowercase = get_serialized_examples(lowercase )
with tf.io.TFRecordWriter(lowercase ) as out_file:
for i in range(len(lowercase ) ):
__lowercase = serialized_examples[i]
out_file.write(lowercase )
print('''Wrote file {} containing {} records'''.format(lowercase , lowercase ) )
shard_count += 1
total_records += records_containing
with open(F"split-{args.split}-records-count.txt" , '''w''' ) as f:
print(F"Total {args.split} records: {total_records}" , file=lowercase )
if __name__ == "__main__":
__a : Optional[Any] = parse_args()
main(args)
| 210
| 1
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
SCREAMING_SNAKE_CASE_ = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
SCREAMING_SNAKE_CASE_ = '▁'
class a ( UpperCAmelCase ):
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_="</s>" , A_="<unk>" , A_="<pad>" , A_=100 , A_=None , A_ = None , A_=True , **A_ , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_UpperCAmelCase : Union[str, Any] = [f'<extra_id_{i}>' for i in range(A_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCAmelCase : Union[str, Any] = len(set(filter(lambda A_ : bool("extra_id" in str(A_ ) ) , A_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
_UpperCAmelCase : Optional[Any] = legacy
_UpperCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A_ , unk_token=A_ , pad_token=A_ , extra_ids=A_ , additional_special_tokens=A_ , sp_model_kwargs=self.sp_model_kwargs , legacy=A_ , **A_ , )
_UpperCAmelCase : Tuple = vocab_file
_UpperCAmelCase : Optional[int] = extra_ids
_UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@staticmethod
def _UpperCAmelCase ( A_ , A_ , A_ ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCAmelCase : str = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , A_ , )
return max_model_length
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCAmelCase ( self , A_ , A_ = None , A_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(A_ )) + [1]
return ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
def _UpperCAmelCase ( self ):
'''simple docstring'''
return list(
set(filter(lambda A_ : bool(re.search(R"<extra_id_\d+>" , A_ ) ) is not None , self.additional_special_tokens ) ) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
return [self._convert_token_to_id(A_ ) for token in self.get_sentinel_tokens()]
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
if len(A_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _UpperCAmelCase ( self , A_ , A_ = None ):
'''simple docstring'''
_UpperCAmelCase : int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _UpperCAmelCase ( self , A_ , A_ = None ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self._add_eos_if_not_present(A_ )
if token_ids_a is None:
return token_ids_a
else:
_UpperCAmelCase : Dict = self._add_eos_if_not_present(A_ )
return token_ids_a + token_ids_a
def __getstate__( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.__dict__.copy()
_UpperCAmelCase : Optional[Any] = None
return state
def __setstate__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase : int = {}
_UpperCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self , A_ , **A_ ):
'''simple docstring'''
if not self.legacy:
_UpperCAmelCase : Dict = SPIECE_UNDERLINE + text.replace(A_ , " " )
return super().tokenize(A_ , **A_ )
def _UpperCAmelCase ( self , A_ , **A_ ):
'''simple docstring'''
if not self.legacy:
_UpperCAmelCase : int = text.startswith(A_ )
if is_first:
_UpperCAmelCase : int = text[1:]
_UpperCAmelCase : Any = self.sp_model.encode(A_ , out_type=A_ )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(A_ ):
_UpperCAmelCase : str = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
if token.startswith("<extra_id_" ):
_UpperCAmelCase : Union[str, Any] = re.match(R"<extra_id_(\d+)>" , A_ )
_UpperCAmelCase : Tuple = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(A_ )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
_UpperCAmelCase : Dict = self.sp_model.IdToPiece(A_ )
else:
_UpperCAmelCase : str = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Optional[Any] = ""
_UpperCAmelCase : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A_ ) + token
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : str = []
else:
current_sub_tokens.append(A_ )
_UpperCAmelCase : Tuple = False
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _UpperCAmelCase ( self , A_ , A_ = None ):
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase : Any = os.path.join(
A_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , "wb" ) as fi:
_UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
| 189
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
SCREAMING_SNAKE_CASE_ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
SCREAMING_SNAKE_CASE_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
SCREAMING_SNAKE_CASE_ = re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
SCREAMING_SNAKE_CASE_ = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> Union[str, Any]:
_UpperCAmelCase : List[Any] = None
# source code of `config_class`
_UpperCAmelCase : int = inspect.getsource(lowerCAmelCase )
_UpperCAmelCase : Tuple = _re_checkpoint.findall(lowerCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
_UpperCAmelCase : Dict = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase : Any = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase : Union[str, Any] = ckpt_name
break
return checkpoint
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
_UpperCAmelCase : Any = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_UpperCAmelCase : int = get_checkpoint_from_config_class(lowerCAmelCase )
_UpperCAmelCase : Tuple = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
_UpperCAmelCase : int = "\n".join(sorted(lowerCAmelCase ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 189
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "audio-spectrogram-transformer"
def __init__( self: str, a_: Optional[Any]=768, a_: str=12, a_: Dict=12, a_: List[str]=3_072, a_: str="gelu", a_: Dict=0.0, a_: str=0.0, a_: Any=0.02, a_: Tuple=1E-12, a_: Union[str, Any]=16, a_: Dict=True, a_: Any=10, a_: Optional[Any]=10, a_: Tuple=1_024, a_: Optional[int]=128, **a_: int, ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : List[str] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : str = intermediate_size
_snake_case : int = hidden_act
_snake_case : str = hidden_dropout_prob
_snake_case : str = attention_probs_dropout_prob
_snake_case : Optional[Any] = initializer_range
_snake_case : List[str] = layer_norm_eps
_snake_case : int = patch_size
_snake_case : Any = qkv_bias
_snake_case : Tuple = frequency_stride
_snake_case : List[str] = time_stride
_snake_case : Any = max_length
_snake_case : int = num_mel_bins
| 64
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__A = logging.get_logger(__name__)
@dataclass
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : Union[str, Any] = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
snake_case : int = deprecated_arg[3:]
snake_case : Optional[Any] = not kwargs.pop(SCREAMING_SNAKE_CASE )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
snake_case : Tuple = kwargs.pop("tpu_name" , self.tpu_name )
snake_case : Union[str, Any] = kwargs.pop("device_idx" , self.device_idx )
snake_case : List[str] = kwargs.pop("eager_mode" , self.eager_mode )
snake_case : Union[str, Any] = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**SCREAMING_SNAKE_CASE )
a__ : str = field(
default=lowerCamelCase_ , metadata={"""help""": """Name of TPU"""} , )
a__ : int = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
a__ : bool = field(default=lowerCamelCase_ , metadata={"""help""": """Benchmark models in eager model."""} )
a__ : bool = field(
default=lowerCamelCase_ , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
snake_case : int = None
if self.tpu:
try:
if self.tpu_name:
snake_case : List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
snake_case : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
snake_case : Optional[Any] = None
return tpu
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
snake_case : Any = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
snake_case : str = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
snake_case : List[str] = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.n_gpu > 0
| 148
| 0
|
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def __UpperCAmelCase ( __a : int ) -> Union[str, Any]:
"""simple docstring"""
_a : Any = SwinConfig(image_size=192 )
if "base" in model_name:
_a : Optional[int] = 6
_a : int = 128
_a : str = (2, 2, 18, 2)
_a : Tuple = (4, 8, 16, 32)
elif "large" in model_name:
_a : Union[str, Any] = 12
_a : List[Any] = 192
_a : str = (2, 2, 18, 2)
_a : Optional[Any] = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
_a : List[str] = window_size
_a : str = embed_dim
_a : Union[str, Any] = depths
_a : Optional[Any] = num_heads
return config
def __UpperCAmelCase ( __a : Tuple ) -> Dict:
"""simple docstring"""
if "encoder.mask_token" in name:
_a : int = name.replace('''encoder.mask_token''' ,'''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
_a : List[str] = name.replace('''encoder.patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
_a : Tuple = name.replace('''encoder.patch_embed.norm''' ,'''embeddings.norm''' )
if "attn.proj" in name:
_a : List[str] = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
_a : int = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
_a : List[str] = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
_a : int = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
_a : Optional[Any] = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
_a : int = name.replace('''mlp.fc2''' ,'''output.dense''' )
if name == "encoder.norm.weight":
_a : Dict = '''layernorm.weight'''
if name == "encoder.norm.bias":
_a : Optional[Any] = '''layernorm.bias'''
if "decoder" in name:
pass
else:
_a : List[Any] = '''swin.''' + name
return name
def __UpperCAmelCase ( __a : Dict ,__a : Dict ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_a : Tuple = orig_state_dict.pop(__a )
if "attn_mask" in key:
pass
elif "qkv" in key:
_a : List[str] = key.split('''.''' )
_a : Dict = int(key_split[2] )
_a : Any = int(key_split[4] )
_a : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_a : str = val[:dim, :]
_a : List[Any] = val[
dim : dim * 2, :
]
_a : Tuple = val[-dim:, :]
else:
_a : Optional[Any] = val[
:dim
]
_a : List[Any] = val[
dim : dim * 2
]
_a : str = val[
-dim:
]
else:
_a : Dict = val
return orig_state_dict
def __UpperCAmelCase ( __a : Any ,__a : Optional[Any] ,__a : Union[str, Any] ,__a : Any ) -> List[Any]:
"""simple docstring"""
_a : Any = torch.load(__a ,map_location='''cpu''' )['''model''']
_a : Optional[Any] = get_swin_config(__a )
_a : int = SwinForMaskedImageModeling(__a )
model.eval()
_a : List[str] = convert_state_dict(__a ,__a )
model.load_state_dict(__a )
_a : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : Tuple = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
_a : Any = Image.open(requests.get(__a ,stream=__a ).raw )
_a : str = image_processor(images=__a ,return_tensors='''pt''' )
with torch.no_grad():
_a : List[Any] = model(**__a ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 15
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
a__ = ['''small''', '''medium''', '''large''']
a__ = '''lm_head.decoder.weight'''
a__ = '''lm_head.weight'''
def __UpperCAmelCase ( __a : str ,__a : str ) -> List[str]:
"""simple docstring"""
_a : Any = torch.load(__a )
_a : List[str] = d.pop(__a )
os.makedirs(__a ,exist_ok=__a )
torch.save(__a ,os.path.join(__a ,__a ) )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
a__ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
a__ = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
a__ = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 15
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class _lowerCamelCase ( a_ ):
def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase , """num_encoder_blocks""" ) )
class _lowerCamelCase :
def __init__( self : Dict , UpperCamelCase : Dict , UpperCamelCase : Optional[int]=13 , UpperCamelCase : Dict=64 , UpperCamelCase : List[str]=3 , UpperCamelCase : Tuple=4 , UpperCamelCase : str=[2, 2, 2, 2] , UpperCamelCase : Dict=[8, 4, 2, 1] , UpperCamelCase : List[Any]=[16, 32, 64, 1_28] , UpperCamelCase : Union[str, Any]=[1, 4, 8, 16] , UpperCamelCase : str=[1, 2, 4, 8] , UpperCamelCase : Tuple=True , UpperCamelCase : List[Any]=True , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : List[str]=0.02 , UpperCamelCase : List[str]=3 , UpperCamelCase : Optional[int]=None , ) -> str:
"""simple docstring"""
lowerCAmelCase__ : str = parent
lowerCAmelCase__ : Union[str, Any] = batch_size
lowerCAmelCase__ : List[Any] = image_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : Tuple = num_encoder_blocks
lowerCAmelCase__ : int = sr_ratios
lowerCAmelCase__ : str = depths
lowerCAmelCase__ : Union[str, Any] = hidden_sizes
lowerCAmelCase__ : Optional[int] = downsampling_rates
lowerCAmelCase__ : Tuple = num_attention_heads
lowerCAmelCase__ : str = is_training
lowerCAmelCase__ : Any = use_labels
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : int = num_labels
lowerCAmelCase__ : Dict = scope
def _lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Tuple = None
if self.use_labels:
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : int = SegformerModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : str = model(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _lowerCAmelCase ( self : int , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Any = self.num_labels
lowerCAmelCase__ : Optional[Any] = SegformerForSemanticSegmentation(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : Optional[int] = model(UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowerCAmelCase__ : int = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : List[str] = SegformerForSemanticSegmentation(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : Dict = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCamelCase )
lowerCAmelCase__ : Tuple = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertGreater(result.loss , 0.0 )
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = config_and_inputs
lowerCAmelCase__ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( a_ , a_ , unittest.TestCase ):
_lowerCamelCase :Union[str, Any] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase :List[Any] = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCamelCase :Dict = True
_lowerCamelCase :Tuple = False
_lowerCamelCase :Tuple = False
_lowerCamelCase :List[str] = False
def _lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : str = SegformerModelTester(self )
lowerCAmelCase__ : int = SegformerConfigTester(self , config_class=UpperCamelCase )
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCamelCase )
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*UpperCamelCase )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def _lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = model_class(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Optional[Any] = [*signature.parameters.keys()]
lowerCAmelCase__ : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[str] = True
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Dict = False
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Optional[int] = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Dict = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCAmelCase__ : List[str] = outputs.attentions
lowerCAmelCase__ : List[Any] = sum(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Tuple = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCAmelCase__ : List[str] = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# verify the first attentions (first block, first layer)
lowerCAmelCase__ : List[str] = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase__ : Optional[Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowerCAmelCase__ : Optional[int] = (self.model_tester.image_size // 32) ** 2
lowerCAmelCase__ : Union[str, Any] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowerCAmelCase__ : Any = len(UpperCamelCase )
# Check attention is always last and order is fine
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : List[str] = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : str = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase ) )
lowerCAmelCase__ : str = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# verify the first attentions (first block, first layer)
lowerCAmelCase__ : List[str] = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase__ : List[Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple ):
lowerCAmelCase__ : Optional[int] = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Any = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
lowerCAmelCase__ : List[str] = outputs.hidden_states
lowerCAmelCase__ : Tuple = self.model_tester.num_encoder_blocks
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[Any] = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : str = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase ):
continue
lowerCAmelCase__ : int = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.train()
lowerCAmelCase__ : str = self._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
lowerCAmelCase__ : List[Any] = model(**UpperCamelCase ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
@slow
def _lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : List[str] = SegformerModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def lowercase_ ( ) -> Optional[int]:
lowerCAmelCase__ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
# only resize + normalize
lowerCAmelCase__ : int = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=UpperCamelCase , align=UpperCamelCase , do_random_crop=UpperCamelCase )
lowerCAmelCase__ : Tuple = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
UpperCamelCase )
lowerCAmelCase__ : Dict = prepare_img()
lowerCAmelCase__ : Dict = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
lowerCAmelCase__ : Union[str, Any] = encoded_inputs.pixel_values.to(UpperCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Dict = model(UpperCamelCase )
lowerCAmelCase__ : Tuple = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowerCAmelCase__ : List[Any] = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase , atol=1E-4 ) )
@slow
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
# only resize + normalize
lowerCAmelCase__ : Optional[Any] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=UpperCamelCase , align=UpperCamelCase , do_random_crop=UpperCamelCase )
lowerCAmelCase__ : Any = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = prepare_img()
lowerCAmelCase__ : Optional[int] = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
lowerCAmelCase__ : Union[str, Any] = encoded_inputs.pixel_values.to(UpperCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowerCAmelCase__ : List[Any] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase , atol=1E-1 ) )
@slow
def _lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
# only resize + normalize
lowerCAmelCase__ : Tuple = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=UpperCamelCase , align=UpperCamelCase , do_random_crop=UpperCamelCase )
lowerCAmelCase__ : Tuple = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = prepare_img()
lowerCAmelCase__ : Tuple = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
lowerCAmelCase__ : str = encoded_inputs.pixel_values.to(UpperCamelCase )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(UpperCamelCase )
lowerCAmelCase__ : Tuple = outputs.logits.detach().cpu()
lowerCAmelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase , target_sizes=[(5_00, 3_00)] )
lowerCAmelCase__ : Any = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , UpperCamelCase )
lowerCAmelCase__ : Tuple = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase )
lowerCAmelCase__ : Tuple = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , UpperCamelCase )
| 242
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase_ ( __UpperCAmelCase ) -> tuple:
return (data["data"], data["target"])
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> np.ndarray:
lowerCAmelCase__ : List[Any] = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__UpperCAmelCase , __UpperCAmelCase )
# Predict target for test data
lowerCAmelCase__ : Dict = xgb.predict(__UpperCAmelCase )
lowerCAmelCase__ : Any = predictions.reshape(len(__UpperCAmelCase ) , 1 )
return predictions
def lowercase_ ( ) -> None:
lowerCAmelCase__ : Optional[Any] = fetch_california_housing()
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = data_handling(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = train_test_split(
__UpperCAmelCase , __UpperCAmelCase , test_size=0.25 , random_state=1 )
lowerCAmelCase__ : Optional[Any] = xgboost(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(__UpperCAmelCase , __UpperCAmelCase )}""" )
print(f"""Mean Square Error : {mean_squared_error(__UpperCAmelCase , __UpperCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 242
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Dict = KandinskyVaaPriorPipeline
lowerCAmelCase : Any = ["prompt"]
lowerCAmelCase : Optional[Any] = ["prompt", "negative_prompt"]
lowerCAmelCase : Optional[int] = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowerCAmelCase : Union[str, Any] = False
@property
def UpperCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return 32
@property
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
return 100
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,)
return CLIPTextModelWithProjection(_snake_case )
@property
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : List[Any] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
lowercase__ : Dict = PriorTransformer(**_snake_case )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowercase__ : List[str] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=224 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=14 ,)
lowercase__ : Optional[Any] = CLIPVisionModelWithProjection(_snake_case )
return model
@property
def UpperCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowercase__ : Union[str, Any] = CLIPImageProcessor(
crop_size=224 ,do_center_crop=_snake_case ,do_normalize=_snake_case ,do_resize=_snake_case ,image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] ,image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] ,resample=3 ,size=224 ,)
return image_processor
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = self.dummy_prior
lowercase__ : Union[str, Any] = self.dummy_image_encoder
lowercase__ : Optional[int] = self.dummy_text_encoder
lowercase__ : Optional[Any] = self.dummy_tokenizer
lowercase__ : Union[str, Any] = self.dummy_image_processor
lowercase__ : Tuple = UnCLIPScheduler(
variance_type='''fixed_small_log''' ,prediction_type='''sample''' ,num_train_timesteps=1_000 ,clip_sample=_snake_case ,clip_sample_range=10.0 ,)
lowercase__ : Dict = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[int] ,_snake_case : Tuple=0 ) -> int:
"""simple docstring"""
if str(_snake_case ).startswith('''mps''' ):
lowercase__ : Optional[int] = torch.manual_seed(_snake_case )
else:
lowercase__ : Any = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : Tuple = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = '''cpu'''
lowercase__ : Optional[Any] = self.get_dummy_components()
lowercase__ : List[Any] = self.pipeline_class(**_snake_case )
lowercase__ : Union[str, Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Optional[int] = pipe(**self.get_dummy_inputs(_snake_case ) )
lowercase__ : Any = output.image_embeds
lowercase__ : List[str] = pipe(
**self.get_dummy_inputs(_snake_case ) ,return_dict=_snake_case ,)[0]
lowercase__ : str = image[0, -10:]
lowercase__ : int = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
lowercase__ : int = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
lowercase__ : int = torch_device == '''cpu'''
lowercase__ : Any = True
lowercase__ : str = False
self._test_inference_batch_single_identical(
test_max_difference=_snake_case ,relax_max_difference=_snake_case ,test_mean_pixel_difference=_snake_case ,)
@skip_mps
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Tuple = torch_device == '''cpu'''
lowercase__ : Optional[int] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_snake_case ,test_mean_pixel_difference=_snake_case ,)
| 351
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[int]:
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : List[str] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : Dict = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : Optional[int] = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : Any = 2
# Initialize accelerator
lowercase__ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : List[Any] = config['''lr''']
lowercase__ : Union[str, Any] = int(config['''num_epochs'''] )
lowercase__ : List[str] = int(config['''seed'''] )
lowercase__ : Any = int(config['''batch_size'''] )
lowercase__ : int = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : str = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : Optional[int] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[str] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : int = model(**__lowerCamelCase )
lowercase__ : Optional[int] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Dict = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : Any = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : Union[str, Any] = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 302
| 0
|
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCAmelCase__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> Optional[Any]:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str ) -> List[str]:
return max(metric_fn(lowerCAmelCase__ , lowerCAmelCase__ ) for gt in ground_truths )
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict ) -> int:
_snake_case = [line.strip() for line in open(lowerCAmelCase__ , '''r''' ).readlines()]
_snake_case = []
if args.gold_data_mode == "qa":
_snake_case = pd.read_csv(lowerCAmelCase__ , sep='''\t''' , header=lowerCAmelCase__ )
for answer_list in data[1]:
_snake_case = ast.literal_eval(lowerCAmelCase__ )
answers.append(lowerCAmelCase__ )
else:
_snake_case = [line.strip() for line in open(lowerCAmelCase__ , '''r''' ).readlines()]
_snake_case = [[reference] for reference in references]
_snake_case = _snake_case = _snake_case = 0
for prediction, ground_truths in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
total += 1
em += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
fa += metric_max_over_ground_truths(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_snake_case = 1_00.0 * em / total
_snake_case = 1_00.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Any ) -> Any:
_snake_case = args.k
_snake_case = [line.strip() for line in open(lowerCAmelCase__ , '''r''' ).readlines()]
_snake_case = [line.strip() for line in open(lowerCAmelCase__ , '''r''' ).readlines()]
_snake_case = _snake_case = 0
for hypo, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
_snake_case = set(hypo.split('''\t''' )[:k] )
_snake_case = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_snake_case = 1_00.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : str ) -> Dict:
def strip_title(__lowerCamelCase : Tuple ):
if title.startswith('''"''' ):
_snake_case = title[1:]
if title.endswith('''"''' ):
_snake_case = title[:-1]
return title
_snake_case = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase__ , return_tensors='''pt''' , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , )['''input_ids'''].to(args.device )
_snake_case = rag_model.rag.question_encoder(lowerCAmelCase__ )
_snake_case = question_enc_outputs[0]
_snake_case = rag_model.retriever(
lowerCAmelCase__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
_snake_case = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_snake_case = []
for docs in all_docs:
_snake_case = [strip_title(lowerCAmelCase__ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(lowerCAmelCase__ ) )
return provenance_strings
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any ) -> Any:
with torch.no_grad():
_snake_case = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase__ , return_tensors='''pt''' , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )
_snake_case = inputs_dict.input_ids.to(args.device )
_snake_case = inputs_dict.attention_mask.to(args.device )
_snake_case = rag_model.generate( # rag_model overwrites generate
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCAmelCase__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_snake_case = rag_model.retriever.generator_tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
if args.print_predictions:
for q, a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info('''Q: {} - A: {}'''.format(lowerCAmelCase__ , lowerCAmelCase__ ) )
return answers
def _UpperCAmelCase ( ) -> Optional[Any]:
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=lowerCAmelCase__ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=lowerCAmelCase__ , choices=['''exact''', '''compressed''', '''legacy'''] , type=lowerCAmelCase__ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=lowerCAmelCase__ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=lowerCAmelCase__ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=lowerCAmelCase__ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=lowerCAmelCase__ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=lowerCAmelCase__ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=lowerCAmelCase__ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=lowerCAmelCase__ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=lowerCAmelCase__ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=lowerCAmelCase__ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
_snake_case = parser.parse_args()
_snake_case = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] ) -> str:
_snake_case = {}
if args.model_type is None:
_snake_case = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
_snake_case = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
_snake_case = args.n_docs
if args.index_name is not None:
_snake_case = args.index_name
if args.index_path is not None:
_snake_case = args.index_path
else:
_snake_case = BartForConditionalGeneration
_snake_case = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , lowerCAmelCase__ )
_snake_case = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
_snake_case = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(lowerCAmelCase__ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
_snake_case = RagRetriever.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_snake_case = model_class.from_pretrained(lowerCAmelCase__ , retriever=lowerCAmelCase__ , **lowerCAmelCase__ )
model.retriever.init_retrieval()
else:
_snake_case = model_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
_snake_case = []
for line in tqdm(lowerCAmelCase__ ):
questions.append(line.strip() )
if len(lowerCAmelCase__ ) == args.eval_batch_size:
_snake_case = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
preds_file.write('''\n'''.join(lowerCAmelCase__ ) + '''\n''' )
preds_file.flush()
_snake_case = []
if len(lowerCAmelCase__ ) > 0:
_snake_case = evaluate_batch_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
preds_file.write('''\n'''.join(lowerCAmelCase__ ) )
preds_file.flush()
score_fn(lowerCAmelCase__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCAmelCase__ = get_args()
main(args)
| 288
|
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=14 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_token_type_ids
__a = use_input_mask
__a = use_labels
__a = use_mc_token_ids
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
__a = self.vocab_size - 1
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
if self.use_mc_token_ids:
__a = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
__a = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCAmelCase ( self ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , *_a ):
__a = CTRLModel(config=_a )
model.to(_a )
model.eval()
model(_a , token_type_ids=_a , head_mask=_a )
model(_a , token_type_ids=_a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , *_a ):
__a = CTRLLMHeadModel(_a )
model.to(_a )
model.eval()
__a = model(_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def __UpperCAmelCase ( self , _a , _a , _a , _a , *_a ):
__a = self.num_labels
__a = CTRLForSequenceClassification(_a )
model.to(_a )
model.eval()
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = model(_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__UpperCAmelCase : Union[str, Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
__UpperCAmelCase : Union[str, Any] = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : str = False
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __UpperCAmelCase ( self ):
__a = CTRLModelTester(self )
__a = ConfigTester(self , config_class=_a , n_embd=37 )
def __UpperCAmelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __UpperCAmelCase ( self ):
pass
@slow
def __UpperCAmelCase ( self ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = CTRLModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def __UpperCAmelCase ( self ):
pass
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __UpperCAmelCase ( self ):
__a = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_a )
__a = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=_a ) # Legal the president is
__a = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__a = model.generate(_a , do_sample=_a )
self.assertListEqual(output_ids[0].tolist() , _a )
| 45
| 0
|
from manim import *
class __magic_name__ ( snake_case ):
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_ = [mem.copy() for i in range(6 )]
UpperCamelCase_ = [mem.copy() for i in range(6 )]
UpperCamelCase_ = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
UpperCamelCase_ = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
UpperCamelCase_ = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
UpperCamelCase_ = Text("CPU" , font_size=24 )
UpperCamelCase_ = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
UpperCamelCase_ = [mem.copy() for i in range(1 )]
UpperCamelCase_ = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
UpperCamelCase_ = Text("GPU" , font_size=24 )
UpperCamelCase_ = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.align_to(_lowercase , _lowercase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowercase )
UpperCamelCase_ = [mem.copy() for i in range(6 )]
UpperCamelCase_ = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
UpperCamelCase_ = Text("Model" , font_size=24 )
UpperCamelCase_ = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) , )
UpperCamelCase_ = MarkupText(
F"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM." , font_size=24 , )
UpperCamelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_ = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=2.5 ) , Write(_lowercase ) , Write(_lowercase ) )
self.add(_lowercase )
UpperCamelCase_ = []
UpperCamelCase_ = []
UpperCamelCase_ = []
for i, rect in enumerate(_lowercase ):
UpperCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.7 )
cpu_target.move_to(_lowercase )
cpu_target.generate_target()
UpperCamelCase_ = 0.46 / 4
UpperCamelCase_ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowercase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_lowercase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_lowercase , buff=0.0 )
cpu_targs.append(_lowercase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowercase ) )
second_animations.append(MoveToTarget(_lowercase , run_time=1.5 ) )
self.play(*_lowercase )
self.play(*_lowercase )
self.wait()
| 60
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
SCREAMING_SNAKE_CASE :Tuple = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class __magic_name__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase = None , _lowercase = None )-> Any:
UpperCamelCase_ = None
UpperCamelCase_ = os.path.abspath(os.path.join("examples" , "by_feature" ) )
UpperCamelCase_ = os.path.abspath("examples" )
for item in os.listdir(_lowercase ):
if item not in EXCLUDE_EXAMPLES:
UpperCamelCase_ = os.path.join(_lowercase , _lowercase )
if os.path.isfile(_lowercase ) and ".py" in item_path:
with self.subTest(
tested_script=_lowercase , feature_script=_lowercase , tested_section="main()" if parser_only else "training_function()" , ):
UpperCamelCase_ = compare_against_test(
os.path.join(_lowercase , _lowercase ) , _lowercase , _lowercase , _lowercase )
UpperCamelCase_ = "\n".join(_lowercase )
if special_strings is not None:
for string in special_strings:
UpperCamelCase_ = diff.replace(_lowercase , "" )
self.assertEqual(_lowercase , "" )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
self.one_complete_example("complete_nlp_example.py" , _lowercase )
self.one_complete_example("complete_nlp_example.py" , _lowercase )
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
UpperCamelCase_ = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , _lowercase , _lowercase , _lowercase )
self.one_complete_example("complete_cv_example.py" , _lowercase , _lowercase , _lowercase )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Optional[int] = False
@classmethod
def UpperCAmelCase_ ( cls )-> List[str]:
super().setUpClass()
UpperCamelCase_ = tempfile.mkdtemp()
UpperCamelCase_ = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCamelCase_ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def UpperCAmelCase_ ( cls )-> List[Any]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
UpperCamelCase_ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split()
UpperCamelCase_ = run_command(self._launch_args + testargs , return_stdout=_lowercase )
self.assertNotIn("epoch 0:" , _lowercase )
self.assertIn("epoch 1:" , _lowercase )
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split()
UpperCamelCase_ = run_command(self._launch_args + testargs , return_stdout=_lowercase )
if torch.cuda.is_available():
UpperCamelCase_ = torch.cuda.device_count()
else:
UpperCamelCase_ = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , _lowercase )
self.assertIn("epoch 1:" , _lowercase )
else:
self.assertIn("epoch 0:" , _lowercase )
self.assertIn("epoch 1:" , _lowercase )
@slow
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
UpperCamelCase_ = run_command(self._launch_args + testargs , return_stdout=_lowercase )
UpperCamelCase_ = re.findall("({.+})" , _lowercase )
UpperCamelCase_ = [r for r in results if "accuracy" in r][-1]
UpperCamelCase_ = ast.literal_eval(_lowercase )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def UpperCAmelCase_ ( self )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
UpperCamelCase_ = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_lowercase , "tracking" ) ) )
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 60
| 1
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowercase (unittest.TestCase ):
@property
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase : Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.dummy_uncond_unet
__lowerCAmelCase : Optional[Any] = KarrasVeScheduler()
__lowerCAmelCase : Optional[int] = KarrasVePipeline(unet=_A , scheduler=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__lowerCAmelCase : Optional[Any] = torch.manual_seed(0 )
__lowerCAmelCase : str = pipe(num_inference_steps=2 , generator=_A , output_type='''numpy''' ).images
__lowerCAmelCase : List[Any] = torch.manual_seed(0 )
__lowerCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=_A , output_type='''numpy''' , return_dict=_A )[0]
__lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
__lowerCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : List[str] = """google/ncsnpp-celebahq-256"""
__lowerCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(_A )
__lowerCAmelCase : Optional[int] = KarrasVeScheduler()
__lowerCAmelCase : Optional[int] = KarrasVePipeline(unet=_A , scheduler=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__lowerCAmelCase : int = torch.manual_seed(0 )
__lowerCAmelCase : Tuple = pipe(num_inference_steps=20 , generator=_A , output_type='''numpy''' ).images
__lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__lowerCAmelCase : Optional[int] = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 275
|
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
lowerCamelCase__ : str = re.compile(r'''([A-Z]+)([A-Z][a-z])''')
lowerCamelCase__ : List[Any] = re.compile(r'''([a-z\d])([A-Z])''')
lowerCamelCase__ : int = re.compile(r'''(?<!_)_(?!_)''')
lowerCamelCase__ : List[str] = re.compile(r'''(_{2,})''')
lowerCamelCase__ : List[Any] = r'''^\w+(\.\w+)*$'''
lowerCamelCase__ : str = r'''<>:/\|?*'''
def UpperCamelCase ( _lowerCAmelCase : Optional[int] ) -> str:
_UpperCAmelCase : Any = _uppercase_uppercase_re.sub(R"""\1_\2""", _lowerCAmelCase )
_UpperCAmelCase : Tuple = _lowercase_uppercase_re.sub(R"""\1_\2""", _lowerCAmelCase )
return name.lower()
def UpperCamelCase ( _lowerCAmelCase : Optional[Any] ) -> Tuple:
_UpperCAmelCase : Optional[Any] = _single_underscore_re.split(_lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = [_multiple_underscores_re.split(_lowerCAmelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_lowerCAmelCase ) if n != """""" )
def UpperCamelCase ( _lowerCAmelCase : Optional[Any] ) -> str:
if os.path.basename(_lowerCAmelCase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : Dict, _lowerCAmelCase : Tuple ) -> int:
if os.path.basename(_lowerCAmelCase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re, _lowerCAmelCase ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(_lowerCAmelCase )}-{split}'''
def UpperCamelCase ( _lowerCAmelCase : Dict, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple=None ) -> List[Any]:
_UpperCAmelCase : Optional[int] = filename_prefix_for_split(_lowerCAmelCase, _lowerCAmelCase )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
_UpperCAmelCase : int = os.path.join(_lowerCAmelCase, _lowerCAmelCase )
return f'''{filepath}*'''
def UpperCamelCase ( _lowerCAmelCase : str, _lowerCAmelCase : str, _lowerCAmelCase : List[Any], _lowerCAmelCase : Any=None, _lowerCAmelCase : int=None ) -> str:
_UpperCAmelCase : Union[str, Any] = filename_prefix_for_split(_lowerCAmelCase, _lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = os.path.join(_lowerCAmelCase, _lowerCAmelCase )
if shard_lengths:
_UpperCAmelCase : List[str] = len(_lowerCAmelCase )
_UpperCAmelCase : List[Any] = [f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(_lowerCAmelCase )]
if filetype_suffix:
_UpperCAmelCase : Union[str, Any] = [filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
_UpperCAmelCase : Any = prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 246
| 0
|
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
__UpperCAmelCase = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
__UpperCAmelCase = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__UpperCAmelCase = BeautifulSoup(res.text, """html.parser""")
__UpperCAmelCase = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(F'https://google.com{link.get("href")}')
| 355
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Dict=13 , lowerCAmelCase : int=7 , lowerCAmelCase : Any=True , lowerCAmelCase : str=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : Tuple=99 , lowerCAmelCase : int=64 , lowerCAmelCase : Any=32 , lowerCAmelCase : str=5 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : str=37 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[int]=5_12 , lowerCAmelCase : List[str]=16 , lowerCAmelCase : str=2 , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : Dict=3 , lowerCAmelCase : int=4 , lowerCAmelCase : Union[str, Any]=None , ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = parent
__lowerCAmelCase : Tuple = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : List[str] = is_training
__lowerCAmelCase : Dict = use_input_mask
__lowerCAmelCase : Optional[int] = use_token_type_ids
__lowerCAmelCase : List[str] = use_labels
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : List[str] = hidden_size
__lowerCAmelCase : Optional[int] = embedding_size
__lowerCAmelCase : Optional[int] = num_hidden_layers
__lowerCAmelCase : Optional[Any] = num_attention_heads
__lowerCAmelCase : Optional[Any] = intermediate_size
__lowerCAmelCase : Optional[int] = hidden_act
__lowerCAmelCase : Any = hidden_dropout_prob
__lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = type_vocab_size
__lowerCAmelCase : Optional[Any] = type_sequence_label_size
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Optional[Any] = num_labels
__lowerCAmelCase : Union[str, Any] = num_choices
__lowerCAmelCase : Union[str, Any] = scope
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Dict = None
if self.use_input_mask:
__lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : int = None
if self.use_labels:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Any = MobileBertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
__lowerCAmelCase : List[Any] = model(lowerCAmelCase , token_type_ids=lowerCAmelCase )
__lowerCAmelCase : Tuple = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = MobileBertForMaskedLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : List[Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = MobileBertForNextSentencePrediction(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : List[Any] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = MobileBertForPreTraining(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Optional[int] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , next_sentence_label=lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : int = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.num_labels
__lowerCAmelCase : int = MobileBertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Optional[int] = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.num_labels
__lowerCAmelCase : Dict = MobileBertForTokenClassification(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Tuple = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.num_choices
__lowerCAmelCase : List[Any] = MobileBertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowerCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Optional[int] = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,(
__lowerCAmelCase
) ,
) : List[Any] = config_and_inputs
__lowerCAmelCase : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : str =(
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[int] =(
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : Union[str, Any] =True
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=False ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class in get_values(lowerCAmelCase ):
__lowerCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = MobileBertModelTester(self )
__lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCAmelCase )
def snake_case_ (__A : Any ) -> Optional[Any]:
return torch.tensor(
__A , dtype=torch.long , device=__A , )
__UpperCAmelCase = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCAmelCase )
__lowerCAmelCase : int = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowerCAmelCase : List[str] = model(lowerCAmelCase )[0]
__lowerCAmelCase : List[Any] = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , lowerCAmelCase )
__lowerCAmelCase : int = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] , device=lowerCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 139
| 0
|
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A_ : int = logging.get_logger(__name__)
A_ : Tuple = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A_ : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
lowerCamelCase__ : str = field(
default=A__ ,metadata={'help': 'Model type selected in the list: ' + ', '.join(A__ )} )
lowerCamelCase__ : str = field(
default=A__ ,metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCamelCase__ : int = field(
default=1_2_8 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowerCamelCase__ : int = field(
default=1_2_8 ,metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} ,)
lowerCamelCase__ : int = field(
default=6_4 ,metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} ,)
lowerCamelCase__ : int = field(
default=3_0 ,metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} ,)
lowerCamelCase__ : bool = field(
default=A__ ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCamelCase__ : bool = field(
default=A__ ,metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCamelCase__ : float = field(
default=0.0 ,metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCamelCase__ : int = field(
default=2_0 ,metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCamelCase__ : int = field(
default=0 ,metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} ,)
lowerCamelCase__ : int = field(default=1 ,metadata={'help': 'multiple threads for converting example to features'} )
class lowerCamelCase (A__ ):
lowerCamelCase__ : Dict = 'train'
lowerCamelCase__ : Union[str, Any] = 'dev'
class lowerCamelCase (A__ ):
lowerCamelCase__ : SquadDataTrainingArguments
lowerCamelCase__ : List[SquadFeatures]
lowerCamelCase__ : Split
lowerCamelCase__ : bool
def __init__( self : Optional[int] , __UpperCAmelCase : SquadDataTrainingArguments , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Union[str, Split] = Split.train , __UpperCAmelCase : Optional[bool] = False , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[str] = "pt" , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = args
SCREAMING_SNAKE_CASE__ = is_language_sensitive
SCREAMING_SNAKE_CASE__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
try:
SCREAMING_SNAKE_CASE__ = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
SCREAMING_SNAKE_CASE__ = mode
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ = """v2""" if args.version_2_with_negative else """v1"""
SCREAMING_SNAKE_CASE__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ = cached_features_file + """.lock"""
with FileLock(__UpperCAmelCase ):
if os.path.exists(__UpperCAmelCase ) and not args.overwrite_cache:
SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = torch.load(__UpperCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
SCREAMING_SNAKE_CASE__ = self.old_features["""features"""]
SCREAMING_SNAKE_CASE__ = self.old_features.get("""dataset""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.old_features.get("""examples""" , __UpperCAmelCase )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
""" future run""" )
else:
if mode == Split.dev:
SCREAMING_SNAKE_CASE__ = self.processor.get_dev_examples(args.data_dir )
else:
SCREAMING_SNAKE_CASE__ = self.processor.get_train_examples(args.data_dir )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__UpperCAmelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , __UpperCAmelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Tuple ) -> Any:
return len(self.features )
def __getitem__( self : List[Any] , __UpperCAmelCase : Union[str, Any] ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
SCREAMING_SNAKE_CASE__ = self.features[i]
SCREAMING_SNAKE_CASE__ = torch.tensor(feature.input_ids , dtype=torch.long )
SCREAMING_SNAKE_CASE__ = torch.tensor(feature.attention_mask , dtype=torch.long )
SCREAMING_SNAKE_CASE__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
SCREAMING_SNAKE_CASE__ = torch.tensor(feature.cls_index , dtype=torch.long )
SCREAMING_SNAKE_CASE__ = torch.tensor(feature.p_mask , dtype=torch.float )
SCREAMING_SNAKE_CASE__ = torch.tensor(feature.is_impossible , dtype=torch.float )
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
SCREAMING_SNAKE_CASE__ = torch.tensor(feature.start_position , dtype=torch.long )
SCREAMING_SNAKE_CASE__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 165
|
"""simple docstring"""
from __future__ import annotations
import math
def A ( snake_case__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = str(snake_case__ )
SCREAMING_SNAKE_CASE__ = [n]
for i in range(1 , len(snake_case__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def A ( snake_case__ ):
'''simple docstring'''
if len(str(snake_case__ ) ) > 3:
if not is_prime(int(str(snake_case__ )[-3:] ) ) or not is_prime(int(str(snake_case__ )[:3] ) ):
return False
return True
def A ( snake_case__ = 11 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 13
while len(snake_case__ ) != count:
if validate(snake_case__ ):
SCREAMING_SNAKE_CASE__ = list_truncated_nums(snake_case__ )
if all(is_prime(snake_case__ ) for i in list_nums ):
list_truncated_primes.append(snake_case__ )
num += 2
return list_truncated_primes
def A ( ):
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'{sum(compute_truncated_primes(11)) = }')
| 165
| 1
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def a ( __a ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def a ( __a , __a , __a ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = np.nan
for i in range(__a ):
UpperCamelCase__ :List[str] = features[:, labels == i]
UpperCamelCase__ :str = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ :List[str] = data - column_reshape(__a )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__a , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ :List[str] = np.dot(__a , centered_data.T )
return covariance_sum / features.shape[1]
def a ( __a , __a , __a ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ :Tuple = features.mean(1 )
UpperCamelCase__ :Optional[Any] = np.nan
for i in range(__a ):
UpperCamelCase__ :str = features[:, labels == i]
UpperCamelCase__ :Dict = data.shape[1]
UpperCamelCase__ :Union[str, Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__a ) - column_reshape(__a ) , (column_reshape(__a ) - column_reshape(__a )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ :str = device_data * np.dot(
column_reshape(__a ) - column_reshape(__a ) , (column_reshape(__a ) - column_reshape(__a )).T , )
return covariance_sum / features.shape[1]
def a ( __a , __a ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ :str = features.mean(1 )
# Center the dataset
UpperCamelCase__ :Any = features - np.reshape(__a , (data_mean.size, 1) )
UpperCamelCase__ :Optional[int] = np.dot(__a , centered_data.T ) / features.shape[1]
UpperCamelCase__ :Any = np.linalg.eigh(__a )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ :int = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ :List[str] = np.dot(filtered_eigenvectors.T , __a )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__a )
logging.error('''Dataset empty''' )
raise AssertionError
def a ( __a , __a , __a , __a ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ :Any = eigh(
covariance_between_classes(__a , __a , __a ) , covariance_within_classes(__a , __a , __a ) , )
UpperCamelCase__ :str = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ :Tuple = np.linalg.svd(__a )
UpperCamelCase__ :List[str] = svd_matrix[:, 0:dimensions]
UpperCamelCase__ :List[str] = np.dot(filtered_svd_matrix.T , __a )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__a )
logging.error('''Dataset empty''' )
raise AssertionError
def a ( ) -> None:
'''simple docstring'''
UpperCamelCase__ :Any = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ :Optional[Any] = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ :str = 2
UpperCamelCase__ :Optional[int] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__a ) as error_info:
UpperCamelCase__ :Any = linear_discriminant_analysis(
__a , __a , __a , __a )
if isinstance(__a , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def a ( ) -> None:
'''simple docstring'''
UpperCamelCase__ :int = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ :Union[str, Any] = 2
UpperCamelCase__ :List[Any] = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(__a ) as error_info:
UpperCamelCase__ :Dict = principal_component_analysis(__a , __a )
if not np.allclose(__a , __a ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case = 16
__snake_case = 32
def a ( __a , __a = 16 , __a = "bert-base-cased" ) -> Any:
'''simple docstring'''
UpperCamelCase__ :List[str] = AutoTokenizer.from_pretrained(__a )
UpperCamelCase__ :List[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :Tuple = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase__ :Optional[int] = datasets.map(
__a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ :Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__a , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__a , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCamelCase__ :Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
UpperCamelCase__ :str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
def a ( __a , __a , __a , __a ) -> str:
'''simple docstring'''
model.eval()
UpperCamelCase__ :List[str] = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ :int = model(**__a )
UpperCamelCase__ :Tuple = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase__ , UpperCamelCase__ :int = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__a ) - 1:
UpperCamelCase__ :Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase__ :List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__a , references=__a , )
UpperCamelCase__ :Union[str, Any] = metric.compute()
return eval_metric["accuracy"]
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ :Any = config['''lr''']
UpperCamelCase__ :Optional[int] = int(config['''num_epochs'''] )
UpperCamelCase__ :List[Any] = int(config['''seed'''] )
UpperCamelCase__ :List[Any] = int(config['''batch_size'''] )
UpperCamelCase__ :List[Any] = args.model_name_or_path
set_seed(__a )
UpperCamelCase__ , UpperCamelCase__ :Any = get_dataloaders(__a , __a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ :Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(__a , return_dict=__a )
# Instantiate optimizer
UpperCamelCase__ :Any = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase__ :Optional[Any] = optimizer_cls(params=model.parameters() , lr=__a )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase__ :Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCamelCase__ :Dict = 1
UpperCamelCase__ :Tuple = (len(__a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase__ :Any = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=0 , num_training_steps=__a , )
else:
UpperCamelCase__ :Any = DummyScheduler(__a , total_num_steps=__a , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = accelerator.prepare(
__a , __a , __a , __a , __a )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase__ :Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Optional[int] = evaluate.load('''glue''' , '''mrpc''' )
UpperCamelCase__ :List[Any] = num_epochs
if args.partial_train_epoch is not None:
UpperCamelCase__ :Optional[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCamelCase__ :Dict = args.resume_from_checkpoint.split('''epoch_''' )[1]
UpperCamelCase__ :Tuple = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCamelCase__ :Any = int(__a ) + 1
UpperCamelCase__ :Dict = evaluation_loop(__a , __a , __a , __a )
accelerator.print('''resumed checkpoint performance:''' , __a )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'''state_{starting_epoch-1}.json''' ) , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.load(__a )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCamelCase__ :Optional[Any] = {}
for epoch in range(__a , __a ):
model.train()
for step, batch in enumerate(__a ):
UpperCamelCase__ :Optional[int] = model(**__a )
UpperCamelCase__ :Optional[int] = outputs.loss
UpperCamelCase__ :str = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCamelCase__ :Union[str, Any] = f'''epoch_{epoch}'''
UpperCamelCase__ :List[Any] = os.path.join(args.output_dir , __a )
accelerator.save_state(__a )
UpperCamelCase__ :List[Any] = evaluation_loop(__a , __a , __a , __a )
UpperCamelCase__ :int = accuracy
UpperCamelCase__ :List[Any] = lr_scheduler.get_lr()[0]
UpperCamelCase__ :Any = optimizer.param_groups[0]['''lr''']
UpperCamelCase__ :int = epoch
UpperCamelCase__ :Tuple = overall_step
accelerator.print(f'''epoch {epoch}:''' , __a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'''state_{epoch}.json''' ) , '''w''' ) as f:
json.dump(__a , __a )
def a ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :List[Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__a , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__a , )
parser.add_argument(
'''--output_dir''' , type=__a , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=__a , default=__a , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=__a , default=__a , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=__a , default=2 , help='''Number of train epochs.''' , )
UpperCamelCase__ :Optional[int] = parser.parse_args()
UpperCamelCase__ :List[str] = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 219
| 0
|
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def _lowerCAmelCase ( _UpperCamelCase : jnp.ndarray , _UpperCamelCase : int , _UpperCamelCase : float = 1 , _UpperCamelCase : float = 1 , _UpperCamelCase : float = 1.0E4 , _UpperCamelCase : bool = False , _UpperCamelCase : float = 1.0 , ) -> jnp.ndarray:
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even"
_SCREAMING_SNAKE_CASE =float(embedding_dim // 2 )
_SCREAMING_SNAKE_CASE =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
_SCREAMING_SNAKE_CASE =min_timescale * jnp.exp(jnp.arange(_UpperCamelCase , dtype=jnp.floataa ) * -log_timescale_increment )
_SCREAMING_SNAKE_CASE =jnp.expand_dims(_UpperCamelCase , 1 ) * jnp.expand_dims(_UpperCamelCase , 0 )
# scale embeddings
_SCREAMING_SNAKE_CASE =scale * emb
if flip_sin_to_cos:
_SCREAMING_SNAKE_CASE =jnp.concatenate([jnp.cos(_UpperCamelCase ), jnp.sin(_UpperCamelCase )] , axis=1 )
else:
_SCREAMING_SNAKE_CASE =jnp.concatenate([jnp.sin(_UpperCamelCase ), jnp.cos(_UpperCamelCase )] , axis=1 )
_SCREAMING_SNAKE_CASE =jnp.reshape(_UpperCamelCase , [jnp.shape(_UpperCamelCase )[0], embedding_dim] )
return signal
class A__ ( nn.Module ):
A__ = 32
A__ = jnp.floataa
@nn.compact
def __call__( self : int , _a : Union[str, Any] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(_a )
_SCREAMING_SNAKE_CASE =nn.silu(_a )
_SCREAMING_SNAKE_CASE =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(_a )
return temb
class A__ ( nn.Module ):
A__ = 32
A__ = False
A__ = 1
@nn.compact
def __call__( self : Tuple , _a : Dict ) -> List[Any]:
'''simple docstring'''
return get_sinusoidal_embeddings(
_a , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 47
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : int = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = ["GLPNFeatureExtractor"]
lowerCamelCase : Optional[int] = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 47
| 1
|
from __future__ import annotations
def UpperCAmelCase_ ( __UpperCAmelCase : list[int | str] ) -> None:
create_state_space_tree(__UpperCAmelCase , [] , 0 , [0 for i in range(len(__UpperCAmelCase ) )] )
def UpperCAmelCase_ ( __UpperCAmelCase : list[int | str] , __UpperCAmelCase : list[int | str] , __UpperCAmelCase : int , __UpperCAmelCase : list[int] , ) -> None:
if index == len(__UpperCAmelCase ):
print(__UpperCAmelCase )
return
for i in range(len(__UpperCAmelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE_ = True
create_state_space_tree(__UpperCAmelCase , __UpperCAmelCase , index + 1 , __UpperCAmelCase )
current_sequence.pop()
SCREAMING_SNAKE_CASE_ = False
lowerCamelCase__ : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCamelCase__ : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 210
|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase__ : str = get_tests_dir('fixtures/dummy-config.json')
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = 0
def lowerCAmelCase_ ( self : Optional[int] ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = AutoConfig.for_model('roberta' )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE_ = os.path.join(_lowerCAmelCase , 'fake-roberta' )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , 'config.json' ) , 'w' ) as f:
f.write(json.dumps({} ) )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertEqual(type(_lowerCAmelCase ) , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
try:
AutoConfig.register('custom' , _lowerCAmelCase )
# Wrong model type will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoConfig.register('model' , _lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoConfig.register('bert' , _lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_ = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowerCAmelCase_ ( self : Optional[int] ):
with self.assertRaisesRegex(
_lowerCAmelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('bert-base' )
def lowerCAmelCase_ ( self : int ):
with self.assertRaisesRegex(
_lowerCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase , revision='aaaaaa' )
def lowerCAmelCase_ ( self : Tuple ):
with self.assertRaisesRegex(
_lowerCAmelCase , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase )
self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' )
def lowerCAmelCase_ ( self : Any ):
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "new-model"
try:
AutoConfig.register('new-model' , _lowerCAmelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=_lowerCAmelCase )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 210
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
lowerCamelCase__ = {"""allegro/herbert-base-cased""": 514}
lowerCamelCase__ = {}
class A__ ( _lowerCamelCase):
A_ : Dict = VOCAB_FILES_NAMES
A_ : str = PRETRAINED_VOCAB_FILES_MAP
A_ : Dict = PRETRAINED_INIT_CONFIGURATION
A_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : List[Any] = HerbertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE="</s>" , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
__lowerCAmelCase : Dict = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : Optional[int] = [self.sep_token_id]
__lowerCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : Union[str, Any] = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 86
|
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(
_lowercase , R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self , __a ) -> np.ndarray:
if self.framework == "tf":
UpperCamelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCamelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a )
else:
raise ValueError("Unsupported framework" )
return masked_index
def snake_case_ (self , __a ) -> np.ndarray:
UpperCamelCase = self.get_masked_index(__a )
UpperCamelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def snake_case_ (self , __a ) -> Any:
if isinstance(__a , __a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__a )
def snake_case_ (self , __a , __a=None , **__a ) -> Dict[str, GenericTensor]:
if return_tensors is None:
UpperCamelCase = self.framework
UpperCamelCase = self.tokenizer(__a , return_tensors=__a )
self.ensure_exactly_one_mask_token(__a )
return model_inputs
def snake_case_ (self , __a ) -> Dict:
UpperCamelCase = self.model(**__a )
UpperCamelCase = model_inputs["input_ids"]
return model_outputs
def snake_case_ (self , __a , __a=5 , __a=None ) -> Dict:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCamelCase = target_ids.shape[0]
UpperCamelCase = model_outputs["input_ids"][0]
UpperCamelCase = model_outputs["logits"]
if self.framework == "tf":
UpperCamelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCamelCase = outputs.numpy()
UpperCamelCase = outputs[0, masked_index, :]
UpperCamelCase = stable_softmax(__a , axis=-1 )
if target_ids is not None:
UpperCamelCase = tf.gather_nd(tf.squeeze(__a , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCamelCase = tf.expand_dims(__a , 0 )
UpperCamelCase = tf.math.top_k(__a , k=__a )
UpperCamelCase , UpperCamelCase = topk.values.numpy(), topk.indices.numpy()
else:
UpperCamelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCamelCase = outputs[0, masked_index, :]
UpperCamelCase = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCamelCase = probs[..., target_ids]
UpperCamelCase , UpperCamelCase = probs.topk(__a )
UpperCamelCase = []
UpperCamelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCamelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCamelCase = input_ids.numpy().copy()
if target_ids is not None:
UpperCamelCase = target_ids[p].tolist()
UpperCamelCase = p
# Filter padding out:
UpperCamelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCamelCase = self.tokenizer.decode(__a , skip_special_tokens=__a )
UpperCamelCase = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(__a )
result.append(__a )
if single_mask:
return result[0]
return result
def snake_case_ (self , __a , __a=None ) -> Any:
if isinstance(__a , __a ):
UpperCamelCase = [targets]
try:
UpperCamelCase = self.tokenizer.get_vocab()
except Exception:
UpperCamelCase = {}
UpperCamelCase = []
for target in targets:
UpperCamelCase = vocab.get(__a , __a )
if id_ is None:
UpperCamelCase = self.tokenizer(
__a , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , max_length=1 , truncation=__a , )["input_ids"]
if len(__a ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
"We cannot replace it with anything meaningful, ignoring it" )
continue
UpperCamelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
UpperCamelCase = list(set(__a ) )
if len(__a ) == 0:
raise ValueError("At least one target must be provided when passed." )
UpperCamelCase = np.array(__a )
return target_ids
def snake_case_ (self , __a=None , __a=None ) -> int:
UpperCamelCase = {}
if targets is not None:
UpperCamelCase = self.get_target_ids(__a , __a )
UpperCamelCase = target_ids
if top_k is not None:
UpperCamelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__(self , __a , *__a , **__a ) -> Tuple:
UpperCamelCase = super().__call__(__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
| 153
| 0
|
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__UpperCamelCase : int = logging.get_logger(__name__)
class __magic_name__ ( __lowerCAmelCase):
A: str = ["pixel_values"]
def __init__( self : str , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[int, float] = 1 / 255 , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = True , **lowerCamelCase__ : Any , ) -> None:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = size if size is not None else {'''shortest_edge''': 224}
UpperCamelCase__ : List[str] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256}
UpperCamelCase__ : Dict = get_size_dict(lowerCamelCase__ , param_name='''crop_size''' )
UpperCamelCase__ : Optional[Any] = do_resize
UpperCamelCase__ : List[Any] = size
UpperCamelCase__ : Optional[int] = resample
UpperCamelCase__ : Optional[int] = do_rescale
UpperCamelCase__ : Dict = rescale_factor
UpperCamelCase__ : Optional[Any] = do_center_crop
UpperCamelCase__ : int = crop_size
UpperCamelCase__ : List[str] = do_flip_channel_order
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : PILImageResampling = PIL.Image.BILINEAR , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : List[str] , ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCamelCase__ : int = get_resize_output_image_size(lowerCamelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCamelCase__ )
return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : List[Any] , ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(lowerCamelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[int, float] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Tuple , ) -> List[Any]:
'''simple docstring'''
return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
'''simple docstring'''
return flip_channel_order(lowerCamelCase__ , data_format=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : ImageInput , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : float = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase__ : List[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : List[Any] = resample if resample is not None else self.resample
UpperCamelCase__ : str = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ : List[str] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCamelCase__ : List[str] = size if size is not None else self.size
UpperCamelCase__ : int = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ )
UpperCamelCase__ : Tuple = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ : int = get_size_dict(lowerCamelCase__ , param_name='''crop_size''' )
UpperCamelCase__ : int = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
UpperCamelCase__ : Union[str, Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
UpperCamelCase__ : Tuple = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
UpperCamelCase__ : Optional[Any] = [self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images]
if do_rescale:
UpperCamelCase__ : List[Any] = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCamelCase__ : List[Any] = [self.flip_channel_order(image=lowerCamelCase__ ) for image in images]
UpperCamelCase__ : Union[str, Any] = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images]
UpperCamelCase__ : int = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Tuple] = None ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCamelCase__ ):
UpperCamelCase__ : Tuple = target_sizes.numpy()
UpperCamelCase__ : Any = []
for idx in range(len(lowerCamelCase__ ) ):
UpperCamelCase__ : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
UpperCamelCase__ : Dict = logits.argmax(dim=1 )
UpperCamelCase__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 51
| 0
|
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
__A = SwinConfig(image_size=1_9_2 )
if "base" in model_name:
__A = 6
__A = 1_2_8
__A = (2, 2, 1_8, 2)
__A = (4, 8, 1_6, 3_2)
elif "large" in model_name:
__A = 1_2
__A = 1_9_2
__A = (2, 2, 1_8, 2)
__A = (6, 1_2, 2_4, 4_8)
else:
raise ValueError("Model not supported, only supports base and large variants" )
__A = window_size
__A = embed_dim
__A = depths
__A = num_heads
return config
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
if "encoder.mask_token" in name:
__A = name.replace("encoder.mask_token" , "embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
__A = name.replace("encoder.patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
__A = name.replace("encoder.patch_embed.norm" , "embeddings.norm" )
if "attn.proj" in name:
__A = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__A = name.replace("attn" , "attention.self" )
if "norm1" in name:
__A = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__A = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__A = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__A = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
__A = "layernorm.weight"
if name == "encoder.norm.bias":
__A = "layernorm.bias"
if "decoder" in name:
pass
else:
__A = "swin." + name
return name
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__A = orig_state_dict.pop(a_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
__A = key.split("." )
__A = int(key_split[2] )
__A = int(key_split[4] )
__A = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__A = val[:dim, :]
__A = val[
dim : dim * 2, :
]
__A = val[-dim:, :]
else:
__A = val[
:dim
]
__A = val[
dim : dim * 2
]
__A = val[
-dim:
]
else:
__A = val
return orig_state_dict
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
__A = torch.load(a_ , map_location="cpu" )["model"]
__A = get_swin_config(a_ )
__A = SwinForMaskedImageModeling(a_ )
model.eval()
__A = convert_state_dict(a_ , a_ )
model.load_state_dict(a_ )
__A = "http://images.cocodataset.org/val2017/000000039769.jpg"
__A = ViTImageProcessor(size={"height": 1_9_2, "width": 1_9_2} )
__A = Image.open(requests.get(a_ , stream=a_ ).raw )
__A = image_processor(images=a_ , return_tensors="pt" )
with torch.no_grad():
__A = model(**a_ ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a_ )
if push_to_hub:
print(F'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(F'''microsoft/{model_name}''' )
image_processor.push_to_hub(F'''microsoft/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 15
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
SCREAMING_SNAKE_CASE :Union[str, Any] = False
SCREAMING_SNAKE_CASE :Any = True
SCREAMING_SNAKE_CASE :Tuple = False
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE :Dict = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
SCREAMING_SNAKE_CASE :Optional[int] = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
SCREAMING_SNAKE_CASE :int = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
SCREAMING_SNAKE_CASE :Dict = reader.read()
SCREAMING_SNAKE_CASE :List[str] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
SCREAMING_SNAKE_CASE :Optional[int] = UNetaDModel(**config)
else:
SCREAMING_SNAKE_CASE :Optional[Any] = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
SCREAMING_SNAKE_CASE :List[str] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
SCREAMING_SNAKE_CASE :List[str] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
SCREAMING_SNAKE_CASE :Optional[Any] = config[key]
del config[key]
SCREAMING_SNAKE_CASE :Optional[Any] = [k.replace('UNetRes', '') for k in config['down_block_types']]
SCREAMING_SNAKE_CASE :List[Any] = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
SCREAMING_SNAKE_CASE :Tuple = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
SCREAMING_SNAKE_CASE :Any = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
SCREAMING_SNAKE_CASE :List[str] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
SCREAMING_SNAKE_CASE :List[Any] = param_value
SCREAMING_SNAKE_CASE :str = True
if not has_changed:
SCREAMING_SNAKE_CASE :List[str] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 15
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCAmelCase_ :
'''simple docstring'''
lowerCAmelCase_ : List[Any] = XGLMConfig
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : Dict = """gelu"""
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str=14 , _UpperCAmelCase : List[Any]=7 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Optional[int]=99 , _UpperCAmelCase : List[Any]=32 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[Any]=37 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : Dict=5_12 , _UpperCAmelCase : Union[str, Any]=0.02 , ):
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = d_model
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = ffn_dim
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = None
UpperCAmelCase__ = 0
UpperCAmelCase__ = 2
UpperCAmelCase__ = 1
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = self.get_config()
UpperCAmelCase__ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Dict = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCAmelCase_ : Any = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCAmelCase_ : List[str] = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = TFXGLMModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=_UpperCAmelCase , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = TFXGLMModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : str=True ):
"""simple docstring"""
UpperCAmelCase__ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
UpperCAmelCase__ = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCAmelCase__ = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
UpperCAmelCase__ = model.generate(_UpperCAmelCase , do_sample=_UpperCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
UpperCAmelCase__ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
UpperCAmelCase__ = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
UpperCAmelCase__ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
UpperCAmelCase__ = model.generate(_UpperCAmelCase , do_sample=_UpperCAmelCase , seed=[7, 0] )
UpperCAmelCase__ = tokenizer.decode(output_ids[0] , skip_special_tokens=_UpperCAmelCase )
UpperCAmelCase__ = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
UpperCAmelCase__ = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
UpperCAmelCase__ = """left"""
# use different length sentences to test batching
UpperCAmelCase__ = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
UpperCAmelCase__ = tokenizer(_UpperCAmelCase , return_tensors="""tf""" , padding=_UpperCAmelCase )
UpperCAmelCase__ = inputs["""input_ids"""]
UpperCAmelCase__ = model.generate(input_ids=_UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
UpperCAmelCase__ = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
UpperCAmelCase__ = model.generate(input_ids=_UpperCAmelCase , max_new_tokens=12 )
UpperCAmelCase__ = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
UpperCAmelCase__ = model.generate(input_ids=_UpperCAmelCase , max_new_tokens=12 )
UpperCAmelCase__ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCAmelCase )
UpperCAmelCase__ = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [non_padded_sentence, padded_sentence] )
| 61
|
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : str , _UpperCAmelCase : str = "▁" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[str, AddedToken] = "<unk>" , _UpperCAmelCase : Union[str, AddedToken] = "</s>" , _UpperCAmelCase : Union[str, AddedToken] = "<pad>" , ):
"""simple docstring"""
UpperCAmelCase__ = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
UpperCAmelCase__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase__ = token_dict["""token"""]
UpperCAmelCase__ = Tokenizer(Unigram() )
UpperCAmelCase__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
UpperCAmelCase__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ),
pre_tokenizers.Digits(individual_digits=_UpperCAmelCase ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase__ = decoders.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
UpperCAmelCase__ = TemplateProcessing(
single=f'''$A {self.special_tokens['eos']['token']}''' , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
UpperCAmelCase__ = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ):
"""simple docstring"""
UpperCAmelCase__ = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase__ = [files]
self._tokenizer.train(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Union[Iterator[str], Iterator[Iterator[str]]] , _UpperCAmelCase : int = 80_00 , _UpperCAmelCase : bool = True , ):
"""simple docstring"""
UpperCAmelCase__ = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
self._tokenizer.train_from_iterator(_UpperCAmelCase , trainer=_UpperCAmelCase )
self.add_unk_id()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = json.loads(self._tokenizer.to_str() )
UpperCAmelCase__ = self.special_tokens["""unk"""]["""id"""]
UpperCAmelCase__ = Tokenizer.from_str(json.dumps(_UpperCAmelCase ) )
| 61
| 1
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def _lowerCamelCase ( self :int ) -> str:
__UpperCamelCase : List[str] = SMALL_MODEL_IDENTIFIER
__UpperCamelCase : Tuple = "pt"
__UpperCamelCase : Tuple = "tf"
def _lowerCamelCase ( self :Optional[int] , a :str ) -> Optional[Any]:
__UpperCamelCase : Dict = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__lowercase )
def _lowerCamelCase ( self :Any , a :Dict ) -> int:
__UpperCamelCase : List[Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__lowercase )
model_tf.save_pretrained(__lowercase )
def _lowerCamelCase ( self :Optional[int] ) -> str:
__UpperCamelCase : Optional[Any] = "mock_framework"
# Framework provided - return whatever the user provides
__UpperCamelCase : List[str] = FeaturesManager.determine_framework(self.test_model , __lowercase )
self.assertEqual(__lowercase , __lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__lowercase )
__UpperCamelCase : Any = FeaturesManager.determine_framework(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__lowercase )
__UpperCamelCase : Union[str, Any] = FeaturesManager.determine_framework(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
def _lowerCamelCase ( self :int ) -> Optional[int]:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__lowercase )
__UpperCamelCase : Tuple = FeaturesManager.determine_framework(__lowercase )
self.assertEqual(__lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__lowercase )
__UpperCamelCase : Union[str, Any] = FeaturesManager.determine_framework(__lowercase )
self.assertEqual(__lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__lowercase ):
__UpperCamelCase : List[str] = FeaturesManager.determine_framework(__lowercase )
def _lowerCamelCase ( self :Any ) -> Tuple:
__UpperCamelCase : Optional[int] = MagicMock(return_value=__lowercase )
with patch("transformers.onnx.features.is_tf_available" , __lowercase ):
__UpperCamelCase : Optional[int] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__UpperCamelCase : Tuple = MagicMock(return_value=__lowercase )
with patch("transformers.onnx.features.is_torch_available" , __lowercase ):
__UpperCamelCase : List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__lowercase , self.framework_tf )
# Both in environment -> use PyTorch
__UpperCamelCase : int = MagicMock(return_value=__lowercase )
__UpperCamelCase : int = MagicMock(return_value=__lowercase )
with patch("transformers.onnx.features.is_tf_available" , __lowercase ), patch(
"transformers.onnx.features.is_torch_available" , __lowercase ):
__UpperCamelCase : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__lowercase , self.framework_pt )
# Both not in environment -> raise error
__UpperCamelCase : Any = MagicMock(return_value=__lowercase )
__UpperCamelCase : Dict = MagicMock(return_value=__lowercase )
with patch("transformers.onnx.features.is_tf_available" , __lowercase ), patch(
"transformers.onnx.features.is_torch_available" , __lowercase ):
with self.assertRaises(__lowercase ):
__UpperCamelCase : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
| 232
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase__ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] ='albert'
def __init__( self : Optional[Any] , __lowercase : Union[str, Any]=30000 , __lowercase : List[str]=128 , __lowercase : Optional[Any]=4096 , __lowercase : Dict=12 , __lowercase : Any=1 , __lowercase : Optional[Any]=64 , __lowercase : Any=16384 , __lowercase : Any=1 , __lowercase : Union[str, Any]="gelu_new" , __lowercase : List[str]=0 , __lowercase : int=0 , __lowercase : Dict=512 , __lowercase : str=2 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=1E-12 , __lowercase : int=0.1 , __lowercase : Any="absolute" , __lowercase : Optional[int]=0 , __lowercase : Dict=2 , __lowercase : Optional[Any]=3 , **__lowercase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__a = vocab_size
__a = embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_hidden_groups
__a = num_attention_heads
__a = inner_group_num
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = classifier_dropout_prob
__a = position_embedding_type
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 302
| 0
|
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = ""
lowercase__ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowercase__ = None # compression type in fsspec. ex: "gzip"
lowercase__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Dict ,lowercase_ : str = "" ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[dict] = None ,**lowercase_ : Any ):
super().__init__(self ,**lowercase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCAmelCase__ : Dict = fsspec.open(
lowercase_ ,mode='''rb''' ,protocol=lowercase_ ,compression=self.compression ,client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' ,{} ), # To avoid issues if it was already passed.
} ,**(target_options or {}) ,)
lowerCAmelCase__ : Any = os.path.basename(self.file.path.split('''::''' )[0] )
lowerCAmelCase__ : Tuple = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
lowerCAmelCase__ : Any = None
@classmethod
def __lowerCAmelCase ( cls : Optional[int] ,lowercase_ : int ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowercase_ ).lstrip('''/''' )
def __lowerCAmelCase ( self : Optional[Any] ):
if self.dir_cache is None:
lowerCAmelCase__ : List[Any] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
lowerCAmelCase__ : str = {f['''name''']: f}
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : str ):
return self.file.open().read()
def __lowerCAmelCase ( self : Tuple ,lowercase_ : str ,lowercase_ : str = "rb" ,lowercase_ : List[Any]=None ,lowercase_ : Dict=True ,lowercase_ : Any=None ,**lowercase_ : Tuple ,):
lowerCAmelCase__ : Union[str, Any] = self._strip_protocol(lowercase_ )
if mode != "rb":
raise ValueError(F'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "bz2"
lowercase__ = "bz2"
lowercase__ = ".bz2"
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "gzip"
lowercase__ = "gzip"
lowercase__ = ".gz"
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "lz4"
lowercase__ = "lz4"
lowercase__ = ".lz4"
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "xz"
lowercase__ = "xz"
lowercase__ = ".xz"
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "zstd"
lowercase__ = "zstd"
lowercase__ = ".zst"
def __init__( self : str ,lowercase_ : str ,lowercase_ : str = "rb" ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[dict] = None ,lowercase_ : int = DEFAULT_BLOCK_SIZE ,**lowercase_ : Union[str, Any] ,):
super().__init__(
fo=lowercase_ ,mode=lowercase_ ,target_protocol=lowercase_ ,target_options=lowercase_ ,block_size=lowercase_ ,**lowercase_ ,)
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCAmelCase__ : List[str] = self.file.__enter__
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] ,lowercase_ : Union[str, Any] ):
lowerCAmelCase__ : Tuple = file_
def __enter__( self : Optional[int] ):
self._file.__enter__()
return self
def __exit__( self : int ,*lowercase_ : str ,**lowercase_ : Optional[Any] ):
self._file.__exit__(*lowercase_ ,**lowercase_ )
def __iter__( self : Union[str, Any] ):
return iter(self._file )
def __lowerCAmelCase ( self : Tuple ):
return next(self._file )
def __getattr__( self : str ,lowercase_ : Any ):
return getattr(self._file ,lowercase_ )
def fixed_enter(*lowercase_ : List[Any] ,**lowercase_ : Dict ):
return WrappedFile(_enter(*lowercase_ ,**lowercase_ ) )
lowerCAmelCase__ : Any = fixed_enter
| 74
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Optional[Any] = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 74
| 1
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"""vocab_file""": """spiece.model"""}
lowercase__ = {
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=False , lowercase=True , lowercase=False , lowercase="<s>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<sep>" , lowercase="<pad>" , lowercase="<cls>" , lowercase="<mask>" , lowercase=["<eop>", "<eod>"] , lowercase = None , **lowercase , ):
_lowerCamelCase : Optional[Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
_lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
_lowerCamelCase : List[Any] = 3
_lowerCamelCase : Dict = do_lower_case
_lowerCamelCase : Optional[Any] = remove_space
_lowerCamelCase : Union[str, Any] = keep_accents
_lowerCamelCase : int = vocab_file
_lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '
'See https://pypi.org/project/jieba/ for installation.' )
_lowerCamelCase : Optional[Any] = jieba
_lowerCamelCase : List[Any] = str.maketrans(' \n' , '\u2582\u2583' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def A_ ( self ):
return len(self.sp_model )
def A_ ( self ):
_lowerCamelCase : Tuple = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCamelCase : Optional[Any] = self.__dict__.copy()
_lowerCamelCase : Optional[int] = None
return state
def __setstate__( self , lowercase ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ ( self , lowercase ):
if self.remove_space:
_lowerCamelCase : List[str] = ' '.join(inputs.strip().split() )
else:
_lowerCamelCase : Tuple = inputs
_lowerCamelCase : Optional[int] = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
_lowerCamelCase : Any = unicodedata.normalize('NFKD' , lowercase )
_lowerCamelCase : Dict = ''.join([c for c in outputs if not unicodedata.combining(lowercase )] )
if self.do_lower_case:
_lowerCamelCase : Dict = outputs.lower()
return outputs
def A_ ( self , lowercase ):
_lowerCamelCase : Tuple = self.preprocess_text(lowercase )
_lowerCamelCase : List[Any] = self.sp_model.encode(lowercase , out_type=lowercase )
_lowerCamelCase : int = []
for piece in pieces:
if len(lowercase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_lowerCamelCase : Optional[int] = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCamelCase : Any = cur_pieces[1:]
else:
_lowerCamelCase : str = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowercase )
else:
new_pieces.append(lowercase )
return new_pieces
def A_ ( self , lowercase ):
return self.sp_model.PieceToId(lowercase )
def A_ ( self , lowercase ):
return self.sp_model.IdToPiece(lowercase )
def A_ ( self , lowercase ):
_lowerCamelCase : List[str] = ''.join(lowercase ).replace(lowercase , ' ' ).strip()
return out_string
def A_ ( self , lowercase , lowercase = None ):
_lowerCamelCase : Optional[int] = [self.sep_token_id]
_lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A_ ( self , lowercase , lowercase = None , lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is not None:
return ([0] * len(lowercase )) + [1] + ([0] * len(lowercase )) + [1, 1]
return ([0] * len(lowercase )) + [1, 1]
def A_ ( self , lowercase , lowercase = None ):
_lowerCamelCase : Optional[int] = [self.sep_token_id]
_lowerCamelCase : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def A_ ( self , lowercase , lowercase = None ):
if not os.path.isdir(lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : Optional[int] = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , 'wb' ) as fi:
_lowerCamelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
def A_ ( self , *lowercase , **lowercase ):
_lowerCamelCase : Any = super()._decode(*lowercase , **lowercase )
_lowerCamelCase : List[Any] = text.replace(' ' , '' ).replace('\u2582' , ' ' ).replace('\u2583' , '\n' )
return text
| 96
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
_A = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
_A = tempfile.mkdtemp()
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(self.tmpdirname , lowerCAmelCase_ )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
# load decoder from hub
_A = """hf-internal-testing/ngram-beam-search-decoder"""
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Tuple:
_A = self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCAmelCase_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Any:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Any:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.get_tokenizer()
_A = self.get_feature_extractor()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
_A = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowerCAmelCase_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_A = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(lowerCAmelCase_ , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=lowerCAmelCase_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCAmelCase ( self ) -> Any:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = floats_list((3, 10_00) )
_A = feature_extractor(lowerCAmelCase_ , return_tensors="""np""" )
_A = processor(lowerCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ) -> Any:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = """This is a test string"""
_A = processor(text=lowerCAmelCase_ )
_A = tokenizer(lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self , lowerCAmelCase_=(2, 10, 16) , lowerCAmelCase_=77 ) -> Tuple:
np.random.seed(lowerCAmelCase_ )
return np.random.rand(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_A = processor.decode(lowerCAmelCase_ )
_A = decoder.decode_beams(lowerCAmelCase_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Optional[int]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_A = processor.batch_decode(lowerCAmelCase_ )
else:
with get_context(lowerCAmelCase_ ).Pool() as pool:
_A = processor.batch_decode(lowerCAmelCase_ , lowerCAmelCase_ )
_A = list(lowerCAmelCase_ )
with get_context("""fork""" ).Pool() as p:
_A = decoder.decode_beams_batch(lowerCAmelCase_ , lowerCAmelCase_ )
_A , _A , _A = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCAmelCase_ , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(lowerCAmelCase_ , decoded_processor.logit_score )
self.assertListEqual(lowerCAmelCase_ , decoded_processor.lm_score )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = self._get_dummy_logits()
_A = 15
_A = -20.0
_A = -4.0
_A = processor.batch_decode(
lowerCAmelCase_ , beam_width=lowerCAmelCase_ , beam_prune_logp=lowerCAmelCase_ , token_min_logp=lowerCAmelCase_ , )
_A = decoded_processor_out.text
_A = list(lowerCAmelCase_ )
with get_context("""fork""" ).Pool() as pool:
_A = decoder.decode_beams_batch(
lowerCAmelCase_ , lowerCAmelCase_ , beam_width=lowerCAmelCase_ , beam_prune_logp=lowerCAmelCase_ , token_min_logp=lowerCAmelCase_ , )
_A = [d[0][0] for d in decoded_decoder_out]
_A = [d[0][2] for d in decoded_decoder_out]
_A = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , lowerCAmelCase_ )
self.assertTrue(np.array_equal(lowerCAmelCase_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , lowerCAmelCase_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(lowerCAmelCase_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , lowerCAmelCase_ , atol=1E-3 ) )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
_A = self._get_dummy_logits()
_A = 2.0
_A = 5.0
_A = -20.0
_A = True
_A = processor.batch_decode(
lowerCAmelCase_ , alpha=lowerCAmelCase_ , beta=lowerCAmelCase_ , unk_score_offset=lowerCAmelCase_ , lm_score_boundary=lowerCAmelCase_ , )
_A = decoded_processor_out.text
_A = list(lowerCAmelCase_ )
decoder.reset_params(
alpha=lowerCAmelCase_ , beta=lowerCAmelCase_ , unk_score_offset=lowerCAmelCase_ , lm_score_boundary=lowerCAmelCase_ , )
with get_context("""fork""" ).Pool() as pool:
_A = decoder.decode_beams_batch(
lowerCAmelCase_ , lowerCAmelCase_ , )
_A = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , lowerCAmelCase_ )
_A = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_A = os.listdir(lowerCAmelCase_ )
_A = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = snapshot_download("""hf-internal-testing/processor_with_lm""" )
_A = WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase_ )
_A = processor.decoder.model_container[processor.decoder._model_key]
_A = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
_A = os.listdir(lowerCAmelCase_ )
_A = os.listdir(lowerCAmelCase_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> List[str]:
_A = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = floats_list((3, 10_00) )
_A = processor_wavaveca(lowerCAmelCase_ , return_tensors="""np""" )
_A = processor_auto(lowerCAmelCase_ , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_A = self._get_dummy_logits()
_A = processor_wavaveca.batch_decode(lowerCAmelCase_ )
_A = processor_auto.batch_decode(lowerCAmelCase_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = self.get_decoder()
_A = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_A = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase ( self ) -> Any:
_A = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = self._get_dummy_logits()[0]
_A = processor.decode(lowerCAmelCase_ , output_word_offsets=lowerCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def UpperCAmelCase ( self ) -> Any:
_A = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
_A = self._get_dummy_logits()
_A = processor.batch_decode(lowerCAmelCase_ , output_word_offsets=lowerCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(lowerCAmelCase_ , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCAmelCase ( self ) -> Any:
import torch
_A = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=lowerCAmelCase_ )
_A = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
_A = iter(lowerCAmelCase_ )
_A = next(lowerCAmelCase_ )
_A = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
_A = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_A = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
_A = model(lowerCAmelCase_ ).logits.cpu().numpy()
_A = processor.decode(logits[0] , output_word_offsets=lowerCAmelCase_ )
_A = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_A = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
_A = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(lowerCAmelCase_ , """word""" ) ) , lowerCAmelCase_ )
self.assertEqual(""" """.join(self.get_from_offsets(lowerCAmelCase_ , """word""" ) ) , output.text )
# output times
_A = torch.tensor(self.get_from_offsets(lowerCAmelCase_ , """start_time""" ) )
_A = torch.tensor(self.get_from_offsets(lowerCAmelCase_ , """end_time""" ) )
# fmt: off
_A = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_A = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=0.01 ) )
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=0.01 ) )
| 180
| 0
|
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = None ) -> int:
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
UpperCamelCase__ : Any = nums[0]
for i in range(1 , len(_lowerCAmelCase ) ):
UpperCamelCase__ : Any = nums[i]
UpperCamelCase__ : List[str] = max(_lowerCAmelCase , ans + num , _lowerCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCamelCase : List[Any] =int(input('''Enter number of elements : ''').strip())
lowerCamelCase : Dict =list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 354
|
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), f'The input value of [n={number}] is not an integer'
if number == 1:
return 2
elif number < 1:
UpperCamelCase__ : List[Any] = f'The input value of [n={number}] has to be > 0'
raise ValueError(__lowerCAmelCase )
else:
UpperCamelCase__ : Optional[Any] = sylvester(number - 1 )
UpperCamelCase__ : str = num - 1
UpperCamelCase__ : int = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 196
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def __A ( __lowerCamelCase , __lowerCamelCase=False , __lowerCamelCase=False ) -> List[Any]:
a = """backbone.""" if is_semantic else """"""
a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'{prefix}blocks.{i}.norm1.weight', f'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm1.bias', f'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.weight', f'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.bias', f'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.weight', f'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.bias', f'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.weight', f'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.bias', f'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.weight', f'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.bias', f'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'{prefix}cls_token', """beit.embeddings.cls_token"""),
(f'{prefix}patch_embed.proj.weight', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'{prefix}patch_embed.proj.bias', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'{prefix}pos_embed', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False , __lowerCamelCase=False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
a = """backbone.""" if is_semantic else """"""
# queries, keys and values
a = state_dict.pop(f'{prefix}blocks.{i}.attn.qkv.weight' )
a = state_dict.pop(f'{prefix}blocks.{i}.attn.q_bias' )
a = state_dict.pop(f'{prefix}blocks.{i}.attn.v_bias' )
a = in_proj_weight[
: config.hidden_size, :
]
a = q_bias
a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a = in_proj_weight[
-config.hidden_size :, :
]
a = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
a = state_dict.pop(f'{prefix}blocks.{i}.gamma_1' )
a = state_dict.pop(f'{prefix}blocks.{i}.gamma_2' )
a = gamma_a
a = gamma_a
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
a = dct.pop(__lowerCamelCase )
a = val
def __A ( ) -> List[str]:
a = """http://images.cocodataset.org/val2017/000000039769.jpg"""
a = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> int:
a = False if """rvlcdip""" in checkpoint_url else True
a = BeitConfig(use_absolute_position_embeddings=__lowerCamelCase , use_mask_token=__lowerCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
a = 1024
a = 4096
a = 24
a = 16
# labels
if "rvlcdip" in checkpoint_url:
a = 16
a = """huggingface/label-files"""
a = """rvlcdip-id2label.json"""
a = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
a = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
a = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" )["""model"""]
a = create_rename_keys(__lowerCamelCase , has_lm_head=__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase , has_lm_head=__lowerCamelCase )
# load HuggingFace model
a = BeitForMaskedImageModeling(__lowerCamelCase ) if has_lm_head else BeitForImageClassification(__lowerCamelCase )
model.eval()
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image
a = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase )
a = prepare_img()
a = image_processor(images=__lowerCamelCase , return_tensors="""pt""" )
a = encoding["""pixel_values"""]
a = model(__lowerCamelCase )
a = outputs.logits
# verify logits
a = [1, 16] if """rvlcdip""" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(__lowerCamelCase ), "Shape of logits not as expected"
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
if has_lm_head:
a = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
a = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(__lowerCamelCase , __lowerCamelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=__lowerCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(__lowerCamelCase , __lowerCamelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=__lowerCamelCase , )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__UpperCamelCase : Dict = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 228
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __A ( ) -> Any:
a = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__lowerCamelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__lowerCamelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__lowerCamelCase )
return parser.parse_args()
def __A ( ) -> Union[str, Any]:
a = parse_args()
# Import training_script as a module.
a = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
a = script_fpath.stem
a = importlib.import_module(__lowerCamelCase )
# Patch sys.argv
a = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 228
| 1
|
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
lowercase =logging.get_logger(__name__)
lowercase ='The Nymphenburg Palace is a beautiful palace in Munich!'
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : str ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] ={
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1_0_2_4,
'hidden_size': 7_6_8,
'max_length': 5_1_2,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1_0_2_4,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
_UpperCAmelCase : List[str] =bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
_UpperCAmelCase : Tuple =BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=__lowerCamelCase , output_all_encodings=__lowerCamelCase , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , __lowerCamelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
_UpperCAmelCase : List[str] ='openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
_UpperCAmelCase : int =os.path.join(get_home_dir() , 'models' )
_UpperCAmelCase : Optional[int] =_load_vocab(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , cls=__lowerCamelCase )
_UpperCAmelCase : Optional[Any] =nlp.model.BERTModel(
__lowerCamelCase , len(__lowerCamelCase ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=__lowerCamelCase , use_token_type_embed=__lowerCamelCase , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=__lowerCamelCase , use_decoder=__lowerCamelCase , )
original_bort.load_parameters(__lowerCamelCase , cast_dtype=__lowerCamelCase , ignore_extra=__lowerCamelCase )
_UpperCAmelCase : Optional[int] =original_bort._collect_params_with_prefix()
# Build our config 🤗
_UpperCAmelCase : List[str] ={
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(__lowerCamelCase ),
}
_UpperCAmelCase : Optional[Any] =BertConfig.from_dict(__lowerCamelCase )
_UpperCAmelCase : str =BertForMaskedLM(__lowerCamelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__lowerCamelCase : Optional[int] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
_UpperCAmelCase : Optional[int] =hf_param.shape
_UpperCAmelCase : int =to_torch(params[gluon_param] )
_UpperCAmelCase : Tuple =gluon_param.shape
assert (
shape_hf == shape_gluon
), f"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
_UpperCAmelCase : Optional[int] =check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
_UpperCAmelCase : int =check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
_UpperCAmelCase : Union[str, Any] =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
_UpperCAmelCase : Optional[Any] =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
_UpperCAmelCase : List[str] =torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
_UpperCAmelCase : BertLayer =hf_bort_model.bert.encoder.layer[i]
# self attention
_UpperCAmelCase : BertSelfAttention =layer.attention.self
_UpperCAmelCase : Optional[int] =check_and_map_params(
self_attn.key.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
_UpperCAmelCase : int =check_and_map_params(
self_attn.key.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
_UpperCAmelCase : Dict =check_and_map_params(
self_attn.query.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
_UpperCAmelCase : Optional[Any] =check_and_map_params(
self_attn.query.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
_UpperCAmelCase : List[str] =check_and_map_params(
self_attn.value.bias.data , f"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
_UpperCAmelCase : str =check_and_map_params(
self_attn.value.weight.data , f"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
_UpperCAmelCase : BertSelfOutput =layer.attention.output
_UpperCAmelCase : int =check_and_map_params(
self_output.dense.bias , f"encoder.transformer_cells.{i}.proj.bias" )
_UpperCAmelCase : Optional[Any] =check_and_map_params(
self_output.dense.weight , f"encoder.transformer_cells.{i}.proj.weight" )
_UpperCAmelCase : Dict =check_and_map_params(
self_output.LayerNorm.bias , f"encoder.transformer_cells.{i}.layer_norm.beta" )
_UpperCAmelCase : Optional[Any] =check_and_map_params(
self_output.LayerNorm.weight , f"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
_UpperCAmelCase : BertIntermediate =layer.intermediate
_UpperCAmelCase : Optional[Any] =check_and_map_params(
intermediate.dense.bias , f"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
_UpperCAmelCase : Union[str, Any] =check_and_map_params(
intermediate.dense.weight , f"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
_UpperCAmelCase : BertOutput =layer.output
_UpperCAmelCase : Optional[Any] =check_and_map_params(
bert_output.dense.bias , f"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
_UpperCAmelCase : List[str] =check_and_map_params(
bert_output.dense.weight , f"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
_UpperCAmelCase : List[Any] =check_and_map_params(
bert_output.LayerNorm.bias , f"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
_UpperCAmelCase : Optional[Any] =check_and_map_params(
bert_output.LayerNorm.weight , f"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
_UpperCAmelCase : List[Any] =RobertaTokenizer.from_pretrained('roberta-base' )
_UpperCAmelCase : Any =tokenizer.encode_plus(__lowerCamelCase )['input_ids']
# Get gluon output
_UpperCAmelCase : Any =mx.nd.array([input_ids] )
_UpperCAmelCase : int =original_bort(inputs=__lowerCamelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__lowerCamelCase )
_UpperCAmelCase : str =BertModel.from_pretrained(__lowerCamelCase )
hf_bort_model.eval()
_UpperCAmelCase : int =tokenizer.encode_plus(__lowerCamelCase , return_tensors='pt' )
_UpperCAmelCase : Dict =hf_bort_model(**__lowerCamelCase )[0]
_UpperCAmelCase : Optional[Any] =output_gluon[0].asnumpy()
_UpperCAmelCase : Optional[Any] =output_hf[0].detach().numpy()
_UpperCAmelCase : Dict =np.max(np.abs(hf_layer - gluon_layer ) ).item()
_UpperCAmelCase : Optional[Any] =np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , __lowerCamelCase )
if __name__ == "__main__":
lowercase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase =parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 242
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase =logging.get_logger(__name__)
lowercase ={
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="glpn"
def __init__( self , snake_case=3 , snake_case=4 , snake_case=[2, 2, 2, 2] , snake_case=[8, 4, 2, 1] , snake_case=[3_2, 6_4, 1_6_0, 2_5_6] , snake_case=[7, 3, 3, 3] , snake_case=[4, 2, 2, 2] , snake_case=[1, 2, 5, 8] , snake_case=[4, 4, 4, 4] , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=0.1 , snake_case=1E-6 , snake_case=6_4 , snake_case=1_0 , snake_case=-1 , **snake_case , ) -> Tuple:
'''simple docstring'''
super().__init__(**snake_case)
_UpperCAmelCase : Any =num_channels
_UpperCAmelCase : List[str] =num_encoder_blocks
_UpperCAmelCase : Optional[Any] =depths
_UpperCAmelCase : str =sr_ratios
_UpperCAmelCase : Dict =hidden_sizes
_UpperCAmelCase : List[str] =patch_sizes
_UpperCAmelCase : Any =strides
_UpperCAmelCase : List[str] =mlp_ratios
_UpperCAmelCase : Dict =num_attention_heads
_UpperCAmelCase : List[str] =hidden_act
_UpperCAmelCase : int =hidden_dropout_prob
_UpperCAmelCase : List[Any] =attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] =initializer_range
_UpperCAmelCase : Tuple =drop_path_rate
_UpperCAmelCase : str =layer_norm_eps
_UpperCAmelCase : Optional[int] =decoder_hidden_size
_UpperCAmelCase : List[str] =max_depth
_UpperCAmelCase : Dict =head_in_index
| 242
| 1
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowerCAmelCase = logging.getLogger(__name__)
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
# save results
if os.path.exists(snake_case__ ):
if os.path.exists(os.path.join(snake_case__ , "config.json" ) ) and os.path.isfile(
os.path.join(snake_case__ , "config.json" ) ):
os.remove(os.path.join(snake_case__ , "config.json" ) )
if os.path.exists(os.path.join(snake_case__ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(snake_case__ , "pytorch_model.bin" ) ):
os.remove(os.path.join(snake_case__ , "pytorch_model.bin" ) )
else:
os.makedirs(snake_case__ )
model.save_pretrained(snake_case__ )
def __lowerCAmelCase ( snake_case__ , snake_case__=False ):
__UpperCamelCase : str = 2
if unlogit:
__UpperCamelCase : Dict = torch.pow(snake_case__ , snake_case__ )
__UpperCamelCase : Any = p * torch.log(snake_case__ )
__UpperCamelCase : Tuple = 0
return -plogp.sum(dim=-1 )
def __lowerCAmelCase ( snake_case__ ):
logger.info("lv, h >\t" + "\t".join(F"{x + 1}" for x in range(len(snake_case__ ) ) ) )
for row in range(len(snake_case__ ) ):
if tensor.dtype != torch.long:
logger.info(F"layer {row + 1}:\t" + "\t".join(F"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(F"layer {row + 1}:\t" + "\t".join(F"{x:d}" for x in tensor[row].cpu().data ) )
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__=True , snake_case__=True , snake_case__=None , snake_case__=False ):
__UpperCamelCase : Optional[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
__UpperCamelCase : int = torch.zeros(snake_case__ , snake_case__ ).to(args.device )
__UpperCamelCase : Any = torch.zeros(snake_case__ , snake_case__ ).to(args.device )
if head_mask is None:
__UpperCamelCase : Dict = torch.ones(snake_case__ , snake_case__ ).to(args.device )
head_mask.requires_grad_(requires_grad=snake_case__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : List[Any] = 0.0
__UpperCamelCase : str = 0.0
for step, inputs in enumerate(tqdm(snake_case__ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
__UpperCamelCase : Union[str, Any] = tuple(t.to(args.device ) for t in inputs )
(__UpperCamelCase ) : Optional[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__UpperCamelCase : Union[str, Any] = model(snake_case__ , labels=snake_case__ , head_mask=snake_case__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__UpperCamelCase : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(snake_case__ ):
__UpperCamelCase : Optional[Any] = entropy(attn.detach() , snake_case__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(snake_case__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__UpperCamelCase : Union[str, Any] = 2
__UpperCamelCase : List[Any] = torch.pow(torch.pow(snake_case__ , snake_case__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
__UpperCamelCase : Tuple = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(snake_case__ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(snake_case__ )
logger.info("Head ranked by importance scores" )
__UpperCamelCase : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__UpperCamelCase : Union[str, Any] = torch.arange(
head_importance.numel() , device=args.device )
__UpperCamelCase : str = head_ranks.view_as(snake_case__ )
print_ad_tensor(snake_case__ )
return attn_entropy, head_importance, total_loss
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Any = compute_heads_importance(snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ )
__UpperCamelCase : Tuple = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , snake_case__ , original_score * args.masking_threshold )
__UpperCamelCase : Optional[Any] = torch.ones_like(snake_case__ )
__UpperCamelCase : Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__UpperCamelCase : Dict = original_score
while current_score >= original_score * args.masking_threshold:
__UpperCamelCase : int = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__UpperCamelCase : List[Any] = float("Inf" )
__UpperCamelCase : Union[str, Any] = head_importance.view(-1 ).sort()[1]
if len(snake_case__ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
__UpperCamelCase : int = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
__UpperCamelCase : int = new_head_mask.view(-1 )
__UpperCamelCase : int = 0.0
__UpperCamelCase : Union[str, Any] = new_head_mask.view_as(snake_case__ )
__UpperCamelCase : List[str] = new_head_mask.clone().detach()
print_ad_tensor(snake_case__ )
# Compute metric and head importance again
__UpperCamelCase : Any = compute_heads_importance(
snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ , head_mask=snake_case__ )
__UpperCamelCase : Dict = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , snake_case__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(snake_case__ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Any = datetime.now()
__UpperCamelCase : str = compute_heads_importance(
snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ , compute_importance=snake_case__ , head_mask=snake_case__ )
__UpperCamelCase : Tuple = 1 / loss
__UpperCamelCase : Dict = datetime.now() - before_time
__UpperCamelCase : Union[str, Any] = sum(p.numel() for p in model.parameters() )
__UpperCamelCase : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(snake_case__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(snake_case__ , snake_case__ ):
__UpperCamelCase : Any = [
v,
]
assert sum(len(snake_case__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(snake_case__ )
__UpperCamelCase : Dict = sum(p.numel() for p in model.parameters() )
__UpperCamelCase : Tuple = datetime.now()
__UpperCamelCase : Dict = compute_heads_importance(
snake_case__ , snake_case__ , snake_case__ , compute_entropy=snake_case__ , compute_importance=snake_case__ , head_mask=snake_case__ , actually_pruned=snake_case__ , )
__UpperCamelCase : Any = 1 / loss
__UpperCamelCase : int = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , snake_case__ , snake_case__ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , snake_case__ , snake_case__ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(snake_case__ , args.output_dir )
def __lowerCAmelCase ( ):
__UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=snake_case__ , type=snake_case__ , required=snake_case__ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=snake_case__ , type=snake_case__ , required=snake_case__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=snake_case__ , type=snake_case__ , required=snake_case__ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=snake_case__ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=snake_case__ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=snake_case__ , type=snake_case__ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=snake_case__ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don\'t normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don\'t normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=snake_case__ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=snake_case__ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=snake_case__ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=snake_case__ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=snake_case__ , help="Batch size." )
parser.add_argument("--seed" , type=snake_case__ , default=42 )
parser.add_argument("--local_rank" , type=snake_case__ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=snake_case__ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=snake_case__ , default="" , help="Can be used for distant debugging." )
__UpperCamelCase : Optional[int] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__UpperCamelCase : List[Any] = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
__UpperCamelCase : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__UpperCamelCase : int = torch.device("cuda" , args.local_rank )
__UpperCamelCase : List[str] = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__UpperCamelCase : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__UpperCamelCase : List[str] = nn.parallel.DistributedDataParallel(
snake_case__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=snake_case__ )
elif args.n_gpu > 1:
__UpperCamelCase : Optional[int] = nn.DataParallel(snake_case__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=snake_case__ )
torch.save(snake_case__ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , snake_case__ )
# Prepare dataset
__UpperCamelCase : Optional[Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__UpperCamelCase : List[str] = (torch.from_numpy(snake_case__ ),)
__UpperCamelCase : int = TensorDataset(*snake_case__ )
__UpperCamelCase : Union[str, Any] = RandomSampler(snake_case__ )
__UpperCamelCase : Any = DataLoader(snake_case__ , sampler=snake_case__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(snake_case__ , snake_case__ , snake_case__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__UpperCamelCase : Dict = mask_heads(snake_case__ , snake_case__ , snake_case__ )
prune_heads(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 298
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ :Optional[Any] = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :int = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a_ :Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 277
| 0
|
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
lowercase__ :int = logging.getLogger(__name__)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , ):
'''simple docstring'''
lowercase = bnb_quantization_config.load_in_abit
lowercase = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
lowercase = []
# custom device map
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(device_map.keys() ) > 1:
lowercase = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowercase = get_keys_to_not_convert(lowerCAmelCase__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowerCAmelCase__ )
lowercase = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowercase = []
lowercase = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowerCAmelCase__ )
# compatibility with peft
lowercase = load_in_abit
lowercase = load_in_abit
lowercase = get_parameter_device(lowerCAmelCase__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
lowercase = replace_with_bnb_layers(lowerCAmelCase__ , lowerCAmelCase__ , modules_to_not_convert=lowerCAmelCase__ )
# convert param to the right dtype
lowercase = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowercase = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
lowercase = getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowerCAmelCase__ ):
param.to(lowerCAmelCase__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
f'The model device type is {model_device.type}. However, cuda is needed for quantization.'
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
lowercase = replace_with_bnb_layers(
lowerCAmelCase__ , lowerCAmelCase__ , modules_to_not_convert=lowerCAmelCase__ )
lowercase = get_quantized_model_device_map(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , max_memory=lowerCAmelCase__ , no_split_module_classes=lowerCAmelCase__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowercase = True
lowercase = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowerCAmelCase__ , offload_state_dict=lowerCAmelCase__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowerCAmelCase__ , device_map=lowerCAmelCase__ , offload_dir=lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None ):
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
lowercase = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
lowercase = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowercase = {}
lowercase = special_dtypes
lowercase = no_split_module_classes
lowercase = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowercase = get_balanced_memory(
lowerCAmelCase__ , low_zero=(device_map == '''balanced_low_0''') , max_memory=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase = max_memory
lowercase = infer_auto_device_map(lowerCAmelCase__ , **lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
# check if don't have any quantized module on the cpu
lowercase = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowercase = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ):
'''simple docstring'''
if modules_to_not_convert is None:
lowercase = []
lowercase , lowercase = _replace_with_bnb_layers(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , ):
'''simple docstring'''
lowercase = False
for name, module in model.named_children():
if current_key_name is None:
lowercase = []
current_key_name.append(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowercase = '''.'''.join(lowerCAmelCase__ )
lowercase = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowercase = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowercase = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowerCAmelCase__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowercase = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
lowercase = module.weight.data
if module.bias is not None:
lowercase = module.bias.data
bnb_module.requires_grad_(lowerCAmelCase__ )
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = True
if len(list(module.children() ) ) > 0:
lowercase , lowercase = _replace_with_bnb_layers(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
# Create a copy of the model
with init_empty_weights():
lowercase = deepcopy(lowerCAmelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowercase = find_tied_parameters(lowerCAmelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase = sum(lowerCAmelCase__ , [] )
lowercase = len(lowerCAmelCase__ ) > 0
# Check if it is a base model
lowercase = False
if hasattr(lowerCAmelCase__ , '''base_model_prefix''' ):
lowercase = not hasattr(lowerCAmelCase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase = list(model.named_children() )
lowercase = [list_modules[-1][0]]
# add last module together with tied weights
lowercase = set(lowerCAmelCase__ ) - set(lowerCAmelCase__ )
lowercase = list(set(lowerCAmelCase__ ) ) + list(lowerCAmelCase__ )
# remove ".weight" from the keys
lowercase = ['''.weight''', '''.bias''']
lowercase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase = name.replace(lowerCAmelCase__ , '''''' )
filtered_module_names.append(lowerCAmelCase__ )
return filtered_module_names
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
for m in model.modules():
if isinstance(lowerCAmelCase__ , bnb.nn.Linearabit ):
return True
return False
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return next(parameter.parameters() ).device
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(lowerCAmelCase__ , lowerCAmelCase__ , 0 , dtype=lowerCAmelCase__ , value=lowerCAmelCase__ )
lowercase = param_name
lowercase = model
if "." in tensor_name:
lowercase = tensor_name.split('''.''' )
for split in splits[:-1]:
lowercase = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
lowercase = new_module
lowercase = splits[-1]
# offload weights
lowercase = False
offload_weight(module._parameters[tensor_name] , lowerCAmelCase__ , lowerCAmelCase__ , index=lowerCAmelCase__ )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , lowerCAmelCase__ , index=lowerCAmelCase__ , )
else:
offload_weight(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , index=lowerCAmelCase__ )
offload_weight(lowerCAmelCase__ , param_name.replace('''weight''' , '''SCB''' ) , lowerCAmelCase__ , index=lowerCAmelCase__ )
set_module_tensor_to_device(lowerCAmelCase__ , lowerCAmelCase__ , '''meta''' , dtype=lowerCAmelCase__ , value=torch.empty(*param.size() ) )
| 368
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase__ :Optional[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ :int = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase__ :List[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowercase__ :List[str] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'config.{attribute}' in modeling_source
or f'getattr(config, "{attribute}"' in modeling_source
or f'getattr(self.config, "{attribute}"' in modeling_source
):
lowercase = True
# Deal with multi-line cases
elif (
re.search(
Rf'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , lowerCAmelCase__ , )
is not None
):
lowercase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowercase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowercase = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
lowercase = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
lowercase = True
if not attribute_used:
lowercase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowercase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowercase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowercase = True
elif attribute.endswith('''_token_id''' ):
lowercase = True
# configuration class specific cases
if not case_allowed:
lowercase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowercase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = dict(inspect.signature(config_class.__init__ ).parameters )
lowercase = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
lowercase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowercase = {}
if len(config_class.attribute_map ) > 0:
lowercase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowercase = inspect.getsourcefile(lowerCAmelCase__ )
lowercase = os.path.dirname(lowerCAmelCase__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowercase = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for fn in os.listdir(lowerCAmelCase__ ) if fn.startswith('''modeling_''' )]
# Get the source code strings
lowercase = []
for path in modeling_paths:
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as fp:
modeling_sources.append(fp.read() )
lowercase = []
for config_param, default_value in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
# `attributes` here is all the variant names for `config_param`
lowercase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
unused_attributes.append(attributes[0] )
return sorted(lowerCAmelCase__ )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowercase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowerCAmelCase__ : inspect.isclass(lowerCAmelCase__ )
and issubclass(lowerCAmelCase__ , lowerCAmelCase__ )
and inspect.getmodule(lowerCAmelCase__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowercase = check_config_attributes_being_used(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
lowercase = unused_attributes
if len(lowerCAmelCase__ ) > 0:
lowercase = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f'{name}: {attributes}\n'
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
check_config_attributes()
| 97
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.