code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'speech_to_text_2'
_SCREAMING_SNAKE_CASE = ['past_key_values']
_SCREAMING_SNAKE_CASE = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , lowercase=10_000 , lowercase=6 , lowercase=2_048 , lowercase=4 , lowercase=0.0 , lowercase=True , lowercase="relu" , lowercase=256 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=2 , lowercase=True , lowercase=1 , lowercase=0 , lowercase=2 , lowercase=1_024 , **lowercase , ) -> Optional[Any]:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , decoder_start_token_id=lowercase , **lowercase , )
| 46
|
import random
from typing import Any
def a_ ( _A ) -> list[Any]:
"""simple docstring"""
for _ in range(len(_A ) ):
snake_case__ = random.randint(0 , len(_A ) - 1 )
snake_case__ = random.randint(0 , len(_A ) - 1 )
snake_case__ , snake_case__ = data[b], data[a]
return data
if __name__ == "__main__":
__UpperCamelCase : Dict = [0, 1, 2, 3, 4, 5, 6, 7]
__UpperCamelCase : Any = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 307
| 0
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
lowerCamelCase_ = logging.getLogger(__name__)
class __A( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """summarization"""
SCREAMING_SNAKE_CASE__ = ["""loss"""]
SCREAMING_SNAKE_CASE__ = ROUGE_KEYS
SCREAMING_SNAKE_CASE__ = """rouge2"""
def __init__(self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
if hparams.sortish_sampler and hparams.gpus > 1:
UpperCamelCase__ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(__SCREAMING_SNAKE_CASE , num_labels=__SCREAMING_SNAKE_CASE , mode=self.mode , **__SCREAMING_SNAKE_CASE )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
UpperCamelCase__ = Path(self.output_dir ) / """metrics.json"""
UpperCamelCase__ = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
UpperCamelCase__ = 0
UpperCamelCase__ = defaultdict(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ = self.config.model_type
UpperCamelCase__ = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
UpperCamelCase__ = {
"""data_dir""": self.hparams.data_dir,
"""max_source_length""": self.hparams.max_source_length,
"""prefix""": self.model.config.prefix or """""",
}
UpperCamelCase__ = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
UpperCamelCase__ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
UpperCamelCase__ = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], F"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
UpperCamelCase__ = get_git_info()["""repo_sha"""]
UpperCamelCase__ = hparams.num_workers
UpperCamelCase__ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
UpperCamelCase__ = self.decoder_start_token_id
UpperCamelCase__ = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
UpperCamelCase__ = False
UpperCamelCase__ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
UpperCamelCase__ = self.hparams.eval_max_gen_length
else:
UpperCamelCase__ = self.model.config.max_length
UpperCamelCase__ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(__SCREAMING_SNAKE_CASE , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
UpperCamelCase__ = True
return readable_batch
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return self.model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.tokenizer.batch_decode(
__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
return lmap(str.strip , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.tokenizer.pad_token_id
UpperCamelCase__ , UpperCamelCase__ = batch["""input_ids"""], batch["""attention_mask"""]
UpperCamelCase__ = batch["""labels"""]
if isinstance(self.model , __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = self.model._shift_right(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ = shift_tokens_right(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
UpperCamelCase__ = decoder_input_ids
self.save_readable_batch(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ = self(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
UpperCamelCase__ = nn.CrossEntropyLoss(ignore_index=__SCREAMING_SNAKE_CASE )
assert lm_logits.shape[-1] == self.vocab_size
UpperCamelCase__ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
UpperCamelCase__ = nn.functional.log_softmax(__SCREAMING_SNAKE_CASE , dim=-1 )
UpperCamelCase__ , UpperCamelCase__ = label_smoothed_nll_loss(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.hparams.label_smoothing , ignore_index=__SCREAMING_SNAKE_CASE )
return (loss,)
@property
def UpperCAmelCase_ (self ):
return self.tokenizer.pad_token_id
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self._step(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ = dict(zip(self.loss_names , __SCREAMING_SNAKE_CASE ) )
# tokens per batch
UpperCamelCase__ = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
UpperCamelCase__ = batch["""input_ids"""].shape[0]
UpperCamelCase__ = batch["""input_ids"""].eq(self.pad ).sum()
UpperCamelCase__ = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return self._generative_step(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="val" ):
self.step_count += 1
UpperCamelCase__ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
UpperCamelCase__ = losses["""loss"""]
UpperCamelCase__ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
UpperCamelCase__ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
UpperCamelCase__ = torch.tensor(__SCREAMING_SNAKE_CASE ).type_as(__SCREAMING_SNAKE_CASE )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ = {F"{prefix}_avg_{k}": x for k, x in losses.items()}
UpperCamelCase__ = self.step_count
self.metrics[prefix].append(__SCREAMING_SNAKE_CASE ) # callback writes this to self.metrics_save_path
UpperCamelCase__ = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"{prefix}_loss": loss,
F"{prefix}_{self.val_metric}": metric_tensor,
}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return calculate_rouge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
UpperCamelCase__ = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=__SCREAMING_SNAKE_CASE , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
UpperCamelCase__ = (time.time() - ta) / batch["""input_ids"""].shape[0]
UpperCamelCase__ = self.ids_to_clean_text(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ = self.ids_to_clean_text(batch["""labels"""] )
UpperCamelCase__ = self._step(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ = dict(zip(self.loss_names , __SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ = self.calc_generative_metrics(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ = np.mean(lmap(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
base_metrics.update(gen_time=__SCREAMING_SNAKE_CASE , gen_len=__SCREAMING_SNAKE_CASE , preds=__SCREAMING_SNAKE_CASE , target=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return base_metrics
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return self._generative_step(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
return self.validation_epoch_end(__SCREAMING_SNAKE_CASE , prefix="""test""" )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.n_obs[type_path]
UpperCamelCase__ = self.target_lens[type_path]
UpperCamelCase__ = self.dataset_class(
self.tokenizer , type_path=__SCREAMING_SNAKE_CASE , n_obs=__SCREAMING_SNAKE_CASE , max_target_length=__SCREAMING_SNAKE_CASE , **self.dataset_kwargs , )
return dataset
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ):
UpperCamelCase__ = self.get_dataset(__SCREAMING_SNAKE_CASE )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
UpperCamelCase__ = dataset.make_sortish_sampler(__SCREAMING_SNAKE_CASE , distributed=self.hparams.gpus > 1 )
return DataLoader(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , shuffle=__SCREAMING_SNAKE_CASE , num_workers=self.num_workers , sampler=__SCREAMING_SNAKE_CASE , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
UpperCamelCase__ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
__SCREAMING_SNAKE_CASE , batch_sampler=__SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , collate_fn=dataset.collate_fn , shuffle=__SCREAMING_SNAKE_CASE , num_workers=self.num_workers , sampler=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
return dataloader
def UpperCAmelCase_ (self ):
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def UpperCAmelCase_ (self ):
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCAmelCase_ (SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
BaseTransformer.add_model_specific_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
add_generic_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
parser.add_argument(
"""--max_source_length""" , default=10_24 , type=__SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=__SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=1_42 , type=__SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=1_42 , type=__SCREAMING_SNAKE_CASE , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--max_tokens_per_batch""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--logger_name""" , type=__SCREAMING_SNAKE_CASE , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=__SCREAMING_SNAKE_CASE , default=-1 , required=__SCREAMING_SNAKE_CASE , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=__SCREAMING_SNAKE_CASE , default=5_00 , required=__SCREAMING_SNAKE_CASE , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=__SCREAMING_SNAKE_CASE , default=-1 , required=__SCREAMING_SNAKE_CASE , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=__SCREAMING_SNAKE_CASE , default="""summarization""" , required=__SCREAMING_SNAKE_CASE , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=__SCREAMING_SNAKE_CASE , default=0.0 , required=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--src_lang""" , type=__SCREAMING_SNAKE_CASE , default="""""" , required=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--tgt_lang""" , type=__SCREAMING_SNAKE_CASE , default="""""" , required=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--eval_beams""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
parser.add_argument(
"""--val_metric""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=__SCREAMING_SNAKE_CASE , default=1 , required=__SCREAMING_SNAKE_CASE , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=__SCREAMING_SNAKE_CASE , default=-1 , required=__SCREAMING_SNAKE_CASE , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class __A( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """translation"""
SCREAMING_SNAKE_CASE__ = ["""loss"""]
SCREAMING_SNAKE_CASE__ = ["""bleu"""]
SCREAMING_SNAKE_CASE__ = """bleu"""
def __init__(self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
super().__init__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase__ = hparams.src_lang
UpperCamelCase__ = hparams.tgt_lang
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return calculate_bleu(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __magic_name__ ( __a : int , __a : Any=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=__a )
check_output_dir(__a , expected_items=3 )
if model is None:
if "summarization" in args.task:
UpperCamelCase__ = SummarizationModule(__a )
else:
UpperCamelCase__ = TranslationModule(__a )
UpperCamelCase__ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
UpperCamelCase__ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
UpperCamelCase__ = os.environ.get("""WANDB_PROJECT""" , __a )
UpperCamelCase__ = WandbLogger(name=model.output_dir.name , project=__a )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
UpperCamelCase__ = WandbLogger(name=model.output_dir.name , project=f"hf_{dataset}" )
if args.early_stopping_patience >= 0:
UpperCamelCase__ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
UpperCamelCase__ = False
UpperCamelCase__ = args.val_metric == """loss"""
UpperCamelCase__ = generic_train(
__a , __a , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __a ) , early_stopping_callback=__a , logger=__a , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
UpperCamelCase__ = """"""
UpperCamelCase__ = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=__a ) )
if checkpoints:
UpperCamelCase__ = checkpoints[-1]
UpperCamelCase__ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
lowerCamelCase_ = pl.Trainer.add_argparse_args(parser)
lowerCamelCase_ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
lowerCamelCase_ = parser.parse_args()
main(args)
| 352
|
import collections
import os
import re
from pathlib import Path
lowerCamelCase_ = '''src/transformers'''
# Matches is_xxx_available()
lowerCamelCase_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowerCamelCase_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowerCamelCase_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase_ = re.compile(r'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase_ = re.compile(r'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowerCamelCase_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowerCamelCase_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
lowerCamelCase_ = re.compile(r'''^\s*else:''')
def __magic_name__ ( __a : List[Any] ):
'''simple docstring'''
if _re_test_backend.search(__a ) is None:
return None
UpperCamelCase__ = [b[0] for b in _re_backend.findall(__a )]
backends.sort()
return "_and_".join(__a )
def __magic_name__ ( __a : Dict ):
'''simple docstring'''
with open(__a , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase__ = f.readlines()
UpperCamelCase__ = 0
while line_index < len(__a ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__a ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase__ = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
UpperCamelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__a ):
UpperCamelCase__ = _re_one_line_import_struct.search(__a ).groups()[0]
UpperCamelCase__ = re.findall(R"""\[([^\]]+)\]""" , __a )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
UpperCamelCase__ = _re_import_struct_key_value.search(__a )
if single_line_import_search is not None:
UpperCamelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(__a ) > 0]
objects.extend(__a )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase__ = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
UpperCamelCase__ = lines[line_index]
if _re_import_struct_add_one.search(__a ) is not None:
objects.append(_re_import_struct_add_one.search(__a ).groups()[0] )
elif _re_import_struct_add_many.search(__a ) is not None:
UpperCamelCase__ = _re_import_struct_add_many.search(__a ).groups()[0].split(""", """ )
UpperCamelCase__ = [obj[1:-1] for obj in imports if len(__a ) > 0]
objects.extend(__a )
elif _re_between_brackets.search(__a ) is not None:
UpperCamelCase__ = _re_between_brackets.search(__a ).groups()[0].split(""", """ )
UpperCamelCase__ = [obj[1:-1] for obj in imports if len(__a ) > 0]
objects.extend(__a )
elif _re_quote_object.search(__a ) is not None:
objects.append(_re_quote_object.search(__a ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
UpperCamelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase__ = []
while (
line_index < len(__a )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_import.search(__a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase__ = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(__a ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_import.search(__a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCamelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __magic_name__ ( __a : Dict , __a : Dict ):
'''simple docstring'''
def find_duplicates(__a : Optional[Any] ):
return [k for k, v in collections.Counter(__a ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase__ = []
for key in import_dict_objects.keys():
UpperCamelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" )
UpperCamelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase__ = """base imports""" if key == """none""" else f"{key} backend"
errors.append(f"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT." )
return errors
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = []
for root, _, files in os.walk(__a ):
if "__init__.py" in files:
UpperCamelCase__ = os.path.join(__a , """__init__.py""" )
UpperCamelCase__ = parse_init(__a )
if objects is not None:
UpperCamelCase__ = analyze_results(*__a )
if len(__a ) > 0:
UpperCamelCase__ = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("""\n""".join(__a ) )
if len(__a ) > 0:
raise ValueError("""\n\n""".join(__a ) )
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = []
for path, directories, files in os.walk(__a ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(__a )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__a ) / folder).glob("""*.py""" ) ) ) == 0:
continue
UpperCamelCase__ = str((Path(__a ) / folder).relative_to(__a ) )
UpperCamelCase__ = short_path.replace(os.path.sep , """.""" )
submodules.append(__a )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase__ = str((Path(__a ) / fname).relative_to(__a ) )
UpperCamelCase__ = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(__a )
return submodules
lowerCamelCase_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def __magic_name__ ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
UpperCamelCase__ = direct_transformers_import(__a )
UpperCamelCase__ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__a , """__init__.py""" ) , """r""" ) as f:
UpperCamelCase__ = f.read()
import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , __a ) ) )
UpperCamelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__a ) > 0:
UpperCamelCase__ = """\n""".join(f"- {module}" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
f"{list_of_modules}\n"
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 178
| 0
|
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] ):
'''simple docstring'''
lowerCamelCase_ = len(lowercase ) // 2
# choose the middle 3 elements
lowerCamelCase_ = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : str , A_ : TransformeraDModel , A_ : AutoencoderKL , A_ : KarrasDiffusionSchedulers , A_ : Optional[Dict[int, str]] = None , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(transformer=A_ , vae=A_ , scheduler=A_ )
# create a imagenet -> id dictionary for easier use
lowerCamelCase_ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(',' ):
lowerCamelCase_ = int(A_ )
lowerCamelCase_ = dict(sorted(self.labels.items() ) )
def a__ ( self : Optional[int] , A_ : Union[str, List[str]] ) -> List[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
lowerCamelCase_ = list(A_ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Any , A_ : List[int] , A_ : float = 4.0 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : int = 50 , A_ : Optional[str] = "pil" , A_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
lowerCamelCase_ = len(A_ )
lowerCamelCase_ = self.transformer.config.sample_size
lowerCamelCase_ = self.transformer.config.in_channels
lowerCamelCase_ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=A_ , device=self.device , dtype=self.transformer.dtype , )
lowerCamelCase_ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowerCamelCase_ = torch.tensor(A_ , device=self.device ).reshape(-1 )
lowerCamelCase_ = torch.tensor([1000] * batch_size , device=self.device )
lowerCamelCase_ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowerCamelCase_ = latent_model_input[: len(A_ ) // 2]
lowerCamelCase_ = torch.cat([half, half] , dim=0 )
lowerCamelCase_ = self.scheduler.scale_model_input(A_ , A_ )
lowerCamelCase_ = t
if not torch.is_tensor(A_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowerCamelCase_ = latent_model_input.device.type == 'mps'
if isinstance(A_ , A_ ):
lowerCamelCase_ = torch.floataa if is_mps else torch.floataa
else:
lowerCamelCase_ = torch.intaa if is_mps else torch.intaa
lowerCamelCase_ = torch.tensor([timesteps] , dtype=A_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowerCamelCase_ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCamelCase_ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowerCamelCase_ = self.transformer(
A_ , timestep=A_ , class_labels=A_ ).sample
# perform guidance
if guidance_scale > 1:
lowerCamelCase_ , lowerCamelCase_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowerCamelCase_ , lowerCamelCase_ = torch.split(A_ , len(A_ ) // 2 , dim=0 )
lowerCamelCase_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowerCamelCase_ = torch.cat([half_eps, half_eps] , dim=0 )
lowerCamelCase_ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowerCamelCase_ , lowerCamelCase_ = torch.split(A_ , A_ , dim=1 )
else:
lowerCamelCase_ = noise_pred
# compute previous image: x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(A_ , A_ , A_ ).prev_sample
if guidance_scale > 1:
lowerCamelCase_ , lowerCamelCase_ = latent_model_input.chunk(2 , dim=0 )
else:
lowerCamelCase_ = latent_model_input
lowerCamelCase_ = 1 / self.vae.config.scaling_factor * latents
lowerCamelCase_ = self.vae.decode(A_ ).sample
lowerCamelCase_ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(A_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=A_ )
| 204
| 1
|
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
UpperCamelCase : List[Any] = parent
UpperCamelCase : List[str] = batch_size
UpperCamelCase : str = decoder_seq_length
# For common tests
UpperCamelCase : Optional[int] = self.decoder_seq_length
UpperCamelCase : Optional[int] = is_training
UpperCamelCase : List[str] = use_attention_mask
UpperCamelCase : List[Any] = use_labels
UpperCamelCase : Dict = vocab_size
UpperCamelCase : int = d_model
UpperCamelCase : List[str] = d_model
UpperCamelCase : List[str] = decoder_layers
UpperCamelCase : str = decoder_layers
UpperCamelCase : Dict = decoder_ffn_dim
UpperCamelCase : Any = decoder_attention_heads
UpperCamelCase : int = decoder_attention_heads
UpperCamelCase : Dict = eos_token_id
UpperCamelCase : Any = bos_token_id
UpperCamelCase : List[str] = pad_token_id
UpperCamelCase : Optional[int] = decoder_start_token_id
UpperCamelCase : Dict = use_cache
UpperCamelCase : str = max_position_embeddings
UpperCamelCase : str = None
UpperCamelCase : Optional[Any] = decoder_seq_length
UpperCamelCase : List[str] = 2
UpperCamelCase : List[str] = 1
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase : Optional[Any] = None
if self.use_attention_mask:
UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
UpperCamelCase : str = None
if self.use_labels:
UpperCamelCase : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
UpperCamelCase : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : Tuple = TrOCRDecoder(config=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase : Dict = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = model(__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ) )
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ) + 1 )
UpperCamelCase : Tuple = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
UpperCamelCase : Optional[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
UpperCamelCase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase : List[Any] = model(__SCREAMING_SNAKE_CASE )['''last_hidden_state''']
UpperCamelCase : Optional[int] = model(__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE )['''last_hidden_state''']
# select random slice
UpperCamelCase : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase : Optional[Any] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
UpperCamelCase : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : int = config_and_inputs
UpperCamelCase : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a, _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
__UpperCamelCase : List[Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
__UpperCamelCase : int = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[Any] = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = TrOCRStandaloneDecoderModelTester(self , is_training=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _lowercase ( self ):
"""simple docstring"""
pass
| 315
|
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : Any = set()
# Replace all the whitespace in our sentence
UpperCamelCase : Union[str, Any] = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 2_6
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
UpperCamelCase : str = [False] * 2_6
for char in input_str:
if char.islower():
UpperCamelCase : List[Any] = True
elif char.isupper():
UpperCamelCase : List[Any] = True
return all(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def a ( ):
"""simple docstring"""
from timeit import timeit
UpperCamelCase : int = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 315
| 1
|
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Any = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class A__ ( __snake_case ):
_UpperCAmelCase :Dict = 'bart'
_UpperCAmelCase :str = ['past_key_values']
_UpperCAmelCase :Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , A_=5_0265 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=3 , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , **A_ , ):
'''simple docstring'''
UpperCamelCase : int = vocab_size
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : Any = d_model
UpperCamelCase : Optional[Any] = encoder_ffn_dim
UpperCamelCase : List[Any] = encoder_layers
UpperCamelCase : int = encoder_attention_heads
UpperCamelCase : Optional[int] = decoder_ffn_dim
UpperCamelCase : List[str] = decoder_layers
UpperCamelCase : Optional[int] = decoder_attention_heads
UpperCamelCase : int = dropout
UpperCamelCase : int = attention_dropout
UpperCamelCase : Tuple = activation_dropout
UpperCamelCase : Tuple = activation_function
UpperCamelCase : int = init_std
UpperCamelCase : List[Any] = encoder_layerdrop
UpperCamelCase : List[str] = decoder_layerdrop
UpperCamelCase : Dict = classifier_dropout
UpperCamelCase : Optional[int] = use_cache
UpperCamelCase : List[Any] = encoder_layers
UpperCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , A_ ):
UpperCamelCase : int = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"The config can simply be saved and uploaded again to be fixed." )
class A__ ( __snake_case ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Optional[int] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCamelCase : List[str] = {0: "batch"}
UpperCamelCase : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCamelCase : Dict = {0: "batch", 1: "decoder_sequence"}
UpperCamelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(A_ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCamelCase : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCamelCase , UpperCamelCase : Optional[int] = self.num_layers
for i in range(A_ ):
UpperCamelCase : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"}
UpperCamelCase : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
else:
UpperCamelCase : Optional[Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Tuple = super().outputs
else:
UpperCamelCase : Dict = super(A_ , self ).outputs
if self.use_past:
UpperCamelCase , UpperCamelCase : int = self.num_layers
for i in range(A_ ):
UpperCamelCase : int = {0: "batch", 2: "past_sequence + sequence"}
UpperCamelCase : Tuple = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ):
'''simple docstring'''
UpperCamelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
# Generate decoder inputs
UpperCamelCase : List[Any] = seq_length if not self.use_past else 1
UpperCamelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
UpperCamelCase : Optional[int] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
UpperCamelCase : List[Any] = dict(**A_ , **A_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCamelCase , UpperCamelCase : Optional[Any] = common_inputs["input_ids"].shape
UpperCamelCase : List[Any] = common_inputs["decoder_input_ids"].shape[1]
UpperCamelCase , UpperCamelCase : List[str] = self.num_attention_heads
UpperCamelCase : int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase : List[Any] = decoder_seq_length + 3
UpperCamelCase : str = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCamelCase : int = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(A_ , A_ )] , dim=1 )
UpperCamelCase : int = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCamelCase , UpperCamelCase : Union[str, Any] = self.num_layers
UpperCamelCase : Any = min(A_ , A_ )
UpperCamelCase : List[str] = max(A_ , A_ ) - min_num_layers
UpperCamelCase : Dict = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(A_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(A_ ),
torch.zeros(A_ ),
torch.zeros(A_ ),
torch.zeros(A_ ),
) )
# TODO: test this.
UpperCamelCase : Optional[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(A_ , A_ ):
common_inputs["past_key_values"].append((torch.zeros(A_ ), torch.zeros(A_ )) )
return common_inputs
def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ):
'''simple docstring'''
UpperCamelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCamelCase , UpperCamelCase : Union[str, Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCamelCase : Optional[Any] = seqlen + 2
UpperCamelCase , UpperCamelCase : List[Any] = self.num_layers
UpperCamelCase , UpperCamelCase : Optional[int] = self.num_attention_heads
UpperCamelCase : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase : Optional[Any] = common_inputs["attention_mask"].dtype
UpperCamelCase : int = torch.cat(
[common_inputs["attention_mask"], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 )
UpperCamelCase : Optional[Any] = [
(torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(A_ )
]
return common_inputs
def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase : Union[str, Any] = tokenizer.num_special_tokens_to_add(A_ )
UpperCamelCase : int = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCamelCase : Dict = dict(tokenizer(A_ , return_tensors=A_ ) )
return common_inputs
def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
elif self.task == "causal-lm":
UpperCamelCase : List[str] = self._generate_dummy_inputs_for_causal_lm(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
else:
UpperCamelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
return common_inputs
def __UpperCamelCase( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase : Optional[Any] = super()._flatten_past_key_values_(A_ , A_ , A_ , A_ )
else:
UpperCamelCase : Optional[Any] = super(A_ , self )._flatten_past_key_values_(
A_ , A_ , A_ , A_ )
| 52
|
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
_lowerCAmelCase = FunnelConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowerCAmelCase = FunnelBaseModel(SCREAMING_SNAKE_CASE_ ) if base_model else FunnelModel(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 158
| 0
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Union[str, Any]:
"""simple docstring"""
snake_case_ = original_name.split('''.''' )[0]
snake_case_ = key.split('''.''' )
snake_case_ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] )
snake_case_ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] )
snake_case_ = orig_block_num - offset
snake_case_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' , f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Optional[int]:
"""simple docstring"""
snake_case_ = OrderedDict()
snake_case_ , snake_case_ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
snake_case_ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
snake_case_ = key[: key.find('''proj''' )]
snake_case_ = key.replace(SCREAMING_SNAKE_CASE , f'''patch_embeddings.{total_embed_found}.''' )
snake_case_ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
snake_case_ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
snake_case_ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
snake_case_ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
snake_case_ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm1''' , '''before_norm''' )
if "norm2" in key:
snake_case_ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
snake_case_ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
snake_case_ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
snake_case_ = key.replace('''head''' , '''classifier''' )
snake_case_ = value
return new_state_dict
def __lowerCAmelCase ()-> str:
"""simple docstring"""
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
snake_case_ = PoolFormerConfig()
# set attributes based on model_name
snake_case_ = '''huggingface/label-files'''
snake_case_ = model_name[-3:]
snake_case_ = 1000
snake_case_ = '''imagenet-1k-id2label.json'''
snake_case_ = (1, 1000)
# set config attributes
snake_case_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
if size == "s12":
snake_case_ = [2, 2, 6, 2]
snake_case_ = [64, 128, 320, 512]
snake_case_ = 4.0
snake_case_ = 0.9
elif size == "s24":
snake_case_ = [4, 4, 12, 4]
snake_case_ = [64, 128, 320, 512]
snake_case_ = 4.0
snake_case_ = 0.9
elif size == "s36":
snake_case_ = [6, 6, 18, 6]
snake_case_ = [64, 128, 320, 512]
snake_case_ = 4.0
snake_case_ = 1E-6
snake_case_ = 0.9
elif size == "m36":
snake_case_ = [6, 6, 18, 6]
snake_case_ = [96, 192, 384, 768]
snake_case_ = 4.0
snake_case_ = 1E-6
snake_case_ = 0.9_5
elif size == "m48":
snake_case_ = [8, 8, 24, 8]
snake_case_ = [96, 192, 384, 768]
snake_case_ = 4.0
snake_case_ = 1E-6
snake_case_ = 0.9_5
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
snake_case_ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
# Prepare image
snake_case_ = prepare_img()
snake_case_ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
snake_case_ = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) )
# rename keys
snake_case_ = rename_keys(SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
snake_case_ = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
snake_case_ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
snake_case_ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
snake_case_ = model(SCREAMING_SNAKE_CASE )
snake_case_ = outputs.logits
# define expected logit slices for different models
if size == "s12":
snake_case_ = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
snake_case_ = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
snake_case_ = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
snake_case_ = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
snake_case_ = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
UpperCAmelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 267
|
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(SCREAMING_SNAKE_CASE , x % y )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
return (x * y) // greatest_common_divisor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE = 20 )-> int:
"""simple docstring"""
snake_case_ = 1
for i in range(1 , n + 1 ):
snake_case_ = lcm(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 267
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 42
class a_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : int ,snake_case : int = 65536 ,snake_case : Optional[int] = None ,snake_case : int = 2 ,snake_case : int = 2 ,snake_case : int = 0 ,snake_case : str = "fourier" ,snake_case : bool = True ,snake_case : bool = False ,snake_case : float = 0.0 ,snake_case : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") ,snake_case : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") ,snake_case : Tuple[str] = "UNetMidBlock1D" ,snake_case : str = None ,snake_case : Tuple[int] = (32, 32, 64) ,snake_case : str = None ,snake_case : int = 8 ,snake_case : int = 1 ,snake_case : bool = False ,):
super().__init__()
SCREAMING_SNAKE_CASE =sample_size
# time
if time_embedding_type == "fourier":
SCREAMING_SNAKE_CASE =GaussianFourierProjection(
embedding_size=8 ,set_W_to_weight=snake_case ,log=snake_case ,flip_sin_to_cos=snake_case )
SCREAMING_SNAKE_CASE =2 * block_out_channels[0]
elif time_embedding_type == "positional":
SCREAMING_SNAKE_CASE =Timesteps(
block_out_channels[0] ,flip_sin_to_cos=snake_case ,downscale_freq_shift=snake_case )
SCREAMING_SNAKE_CASE =block_out_channels[0]
if use_timestep_embedding:
SCREAMING_SNAKE_CASE =block_out_channels[0] * 4
SCREAMING_SNAKE_CASE =TimestepEmbedding(
in_channels=snake_case ,time_embed_dim=snake_case ,act_fn=snake_case ,out_dim=block_out_channels[0] ,)
SCREAMING_SNAKE_CASE =nn.ModuleList([] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =nn.ModuleList([] )
SCREAMING_SNAKE_CASE =None
# down
SCREAMING_SNAKE_CASE =in_channels
for i, down_block_type in enumerate(snake_case ):
SCREAMING_SNAKE_CASE =output_channel
SCREAMING_SNAKE_CASE =block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
SCREAMING_SNAKE_CASE =i == len(snake_case ) - 1
SCREAMING_SNAKE_CASE =get_down_block(
snake_case ,num_layers=snake_case ,in_channels=snake_case ,out_channels=snake_case ,temb_channels=block_out_channels[0] ,add_downsample=not is_final_block or downsample_each_block ,)
self.down_blocks.append(snake_case )
# mid
SCREAMING_SNAKE_CASE =get_mid_block(
snake_case ,in_channels=block_out_channels[-1] ,mid_channels=block_out_channels[-1] ,out_channels=block_out_channels[-1] ,embed_dim=block_out_channels[0] ,num_layers=snake_case ,add_downsample=snake_case ,)
# up
SCREAMING_SNAKE_CASE =list(reversed(snake_case ) )
SCREAMING_SNAKE_CASE =reversed_block_out_channels[0]
if out_block_type is None:
SCREAMING_SNAKE_CASE =out_channels
else:
SCREAMING_SNAKE_CASE =block_out_channels[0]
for i, up_block_type in enumerate(snake_case ):
SCREAMING_SNAKE_CASE =output_channel
SCREAMING_SNAKE_CASE =(
reversed_block_out_channels[i + 1] if i < len(snake_case ) - 1 else final_upsample_channels
)
SCREAMING_SNAKE_CASE =i == len(snake_case ) - 1
SCREAMING_SNAKE_CASE =get_up_block(
snake_case ,num_layers=snake_case ,in_channels=snake_case ,out_channels=snake_case ,temb_channels=block_out_channels[0] ,add_upsample=not is_final_block ,)
self.up_blocks.append(snake_case )
SCREAMING_SNAKE_CASE =output_channel
# out
SCREAMING_SNAKE_CASE =norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 ,32 )
SCREAMING_SNAKE_CASE =get_out_block(
out_block_type=snake_case ,num_groups_out=snake_case ,embed_dim=block_out_channels[0] ,out_channels=snake_case ,act_fn=snake_case ,fc_dim=block_out_channels[-1] // 4 ,)
def _lowerCAmelCase ( self : Any ,snake_case : torch.FloatTensor ,snake_case : Union[torch.Tensor, float, int] ,snake_case : bool = True ,):
SCREAMING_SNAKE_CASE =timestep
if not torch.is_tensor(snake_case ):
SCREAMING_SNAKE_CASE =torch.tensor([timesteps] ,dtype=torch.long ,device=sample.device )
elif torch.is_tensor(snake_case ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE =timesteps[None].to(sample.device )
SCREAMING_SNAKE_CASE =self.time_proj(snake_case )
if self.config.use_timestep_embedding:
SCREAMING_SNAKE_CASE =self.time_mlp(snake_case )
else:
SCREAMING_SNAKE_CASE =timestep_embed[..., None]
SCREAMING_SNAKE_CASE =timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
SCREAMING_SNAKE_CASE =timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
SCREAMING_SNAKE_CASE =()
for downsample_block in self.down_blocks:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =downsample_block(hidden_states=snake_case ,temb=snake_case )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
SCREAMING_SNAKE_CASE =self.mid_block(snake_case ,snake_case )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
SCREAMING_SNAKE_CASE =down_block_res_samples[-1:]
SCREAMING_SNAKE_CASE =down_block_res_samples[:-1]
SCREAMING_SNAKE_CASE =upsample_block(snake_case ,res_hidden_states_tuple=snake_case ,temb=snake_case )
# 5. post-process
if self.out_block:
SCREAMING_SNAKE_CASE =self.out_block(snake_case ,snake_case )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=snake_case )
| 334
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'vit_mae'
def __init__( self : Union[str, Any] ,snake_case : Any=768 ,snake_case : List[str]=12 ,snake_case : Optional[int]=12 ,snake_case : int=3072 ,snake_case : List[Any]="gelu" ,snake_case : str=0.0 ,snake_case : str=0.0 ,snake_case : Optional[Any]=0.02 ,snake_case : Dict=1e-12 ,snake_case : List[str]=224 ,snake_case : Any=16 ,snake_case : Any=3 ,snake_case : Tuple=True ,snake_case : List[Any]=16 ,snake_case : List[str]=512 ,snake_case : List[Any]=8 ,snake_case : Dict=2048 ,snake_case : Union[str, Any]=0.75 ,snake_case : Union[str, Any]=False ,**snake_case : Optional[int] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =qkv_bias
SCREAMING_SNAKE_CASE =decoder_num_attention_heads
SCREAMING_SNAKE_CASE =decoder_hidden_size
SCREAMING_SNAKE_CASE =decoder_num_hidden_layers
SCREAMING_SNAKE_CASE =decoder_intermediate_size
SCREAMING_SNAKE_CASE =mask_ratio
SCREAMING_SNAKE_CASE =norm_pix_loss
| 334
| 1
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def lowercase_ ( ) -> int:
"""simple docstring"""
snake_case = ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]" )
snake_case = parser.add_subparsers(help="diffusers-cli command helpers" )
# Register commands
EnvironmentCommand.register_subcommand(A__ )
# Let's go
snake_case = parser.parse_args()
if not hasattr(A__ , "func" ):
parser.print_help()
exit(1 )
# Run
snake_case = args.func(A__ )
service.run()
if __name__ == "__main__":
main()
| 137
|
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=A_ ):
UpperCAmelCase__ : Union[str, Any] = ["onnx"]
def __init__(self : Tuple , *_A : Optional[int] , **_A : Any ) -> Dict:
requires_backends(self , ["onnx"] )
@classmethod
def UpperCAmelCase(cls : int , *_A : Dict , **_A : List[Any] ) -> Optional[Any]:
requires_backends(cls , ["onnx"] )
@classmethod
def UpperCAmelCase(cls : Dict , *_A : Tuple , **_A : Optional[Any] ) -> int:
requires_backends(cls , ["onnx"] )
| 137
| 1
|
from __future__ import annotations
from math import pi, sqrt
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowercase = "\\n Text data.\n Second line of data."
lowercase = "file"
@pytest.fixture(scope='session')
def __UpperCAmelCase ( a_):
snake_case_ = tmp_path_factory.mktemp('data') / (FILE_PATH + '.zstd')
snake_case_ = bytes(a_ , 'utf-8')
with zstd.open(a_ , 'wb') as f:
f.write(a_)
return path
@pytest.fixture
def __UpperCAmelCase ( a_):
with open(os.path.join(tmpfs.local_root_dir , a_) , 'w') as f:
f.write(a_)
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'])
def __UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_):
snake_case_ = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
snake_case_ = input_paths[compression_format]
snake_case_ = tmp_path / 'cache'
snake_case_ = DownloadConfig(cache_dir=a_ , extract_compressed_file=a_)
snake_case_ = cached_path(a_ , download_config=a_)
with open(a_) as f:
snake_case_ = f.read()
with open(a_) as f:
snake_case_ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False])
@pytest.mark.parametrize('default_cache_dir' , [True, False])
def __UpperCAmelCase ( a_ , a_ , a_ , a_ , a_):
snake_case_ = 'custom_cache'
snake_case_ = 'custom_extracted_dir'
snake_case_ = tmp_path / 'custom_extracted_path'
if default_extracted:
snake_case_ = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , a_)
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(a_))
snake_case_ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
snake_case_ = xz_file
snake_case_ = (
DownloadConfig(extract_compressed_file=a_)
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a_)
)
snake_case_ = cached_path(a_ , download_config=a_)
assert Path(a_).parent.parts[-2:] == expected
def __UpperCAmelCase ( a_):
# absolute path
snake_case_ = str(Path(a_).resolve())
assert cached_path(a_) == text_file
# relative path
snake_case_ = str(Path(a_).resolve().relative_to(Path(os.getcwd())))
assert cached_path(a_) == text_file
def __UpperCAmelCase ( a_):
# absolute path
snake_case_ = str(tmp_path.resolve() / '__missing_file__.txt')
with pytest.raises(a_):
cached_path(a_)
# relative path
snake_case_ = './__missing_file__.txt'
with pytest.raises(a_):
cached_path(a_)
def __UpperCAmelCase ( a_):
snake_case_ = get_from_cache(f'''tmp://{tmpfs_file}''')
with open(a_) as f:
snake_case_ = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , a_)
def __UpperCAmelCase ( ):
with pytest.raises(a_):
cached_path('https://huggingface.co')
@patch('datasets.config.HF_DATASETS_OFFLINE' , a_)
def __UpperCAmelCase ( a_):
snake_case_ = tmp_path_factory.mktemp('data') / 'file.html'
with pytest.raises(a_):
http_get('https://huggingface.co' , temp_file=a_)
with pytest.raises(a_):
http_head('https://huggingface.co')
@patch('datasets.config.HF_DATASETS_OFFLINE' , a_)
def __UpperCAmelCase ( a_):
snake_case_ = tmp_path_factory.mktemp('data') / 'file.html'
with pytest.raises(a_):
ftp_get('ftp://huggingface.co' , temp_file=a_)
with pytest.raises(a_):
ftp_head('ftp://huggingface.co')
@patch('datasets.config.HF_DATASETS_OFFLINE' , a_)
def __UpperCAmelCase ( a_):
snake_case_ = tmp_path_factory.mktemp('data') / 'file.html'
with pytest.raises(a_):
fsspec_get('s3://huggingface.co' , temp_file=a_)
with pytest.raises(a_):
fsspec_head('s3://huggingface.co')
| 178
| 0
|
'''simple docstring'''
def snake_case__ ( lowerCamelCase__ : int = 1_0_0_0 ) -> int:
A_ : int = 1, 1
A_ : Dict = 2
while True:
A_ : List[Any] = 0
A_ : str = fa + fa
A_ : str = fa, f
index += 1
for _ in str(lowerCamelCase__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 356
|
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]:
A_ : str = []
for i in range(len(lowerCamelCase__ ) ):
A_ : Optional[Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
A_ : Optional[int] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowerCamelCase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowerCamelCase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
A_ : List[str] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowerCamelCase__ )
return next_generation
def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]:
A_ : List[Any] = []
for _ in range(lowerCamelCase__ ):
# Create output image
A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) )
A_ : int = img.load()
# Save cells to image
for x in range(len(lowerCamelCase__ ) ):
for y in range(len(cells[0] ) ):
A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5
A_ : str = (colour, colour, colour)
# Save image
images.append(lowerCamelCase__ )
A_ : Optional[int] = new_generation(lowerCamelCase__ )
return images
if __name__ == "__main__":
snake_case__ = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 4
| 0
|
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase_ :
'''simple docstring'''
def __init__( self : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str]=99 , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Optional[Any]=7 , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : int=32 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : int=4 , _UpperCAmelCase : str=30 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[Any]=None , ):
_A = parent
_A = batch_size
_A = decoder_seq_length
# For common tests
_A = self.decoder_seq_length
_A = is_training
_A = use_attention_mask
_A = use_labels
_A = vocab_size
_A = d_model
_A = d_model
_A = decoder_layers
_A = decoder_layers
_A = decoder_ffn_dim
_A = decoder_attention_heads
_A = decoder_attention_heads
_A = eos_token_id
_A = bos_token_id
_A = pad_token_id
_A = decoder_start_token_id
_A = use_cache
_A = max_position_embeddings
_A = None
_A = decoder_seq_length
_A = 2
_A = 1
def lowerCAmelCase_ ( self : str ):
_A = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_A = None
if self.use_attention_mask:
_A = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_A = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , ):
_A = True
_A = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
_A = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_A = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
_A = model(_UpperCAmelCase )
_A = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 )
_A = outputs['past_key_values']
# create hypothetical next token and extent to next_input_ids
_A = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_A = torch.cat([input_ids, next_tokens] , dim=-1 )
_A = model(_UpperCAmelCase )['last_hidden_state']
_A = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )['last_hidden_state']
# select random slice
_A = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_A = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_A = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
def lowerCAmelCase_ ( self : Dict ):
_A = self.prepare_config_and_inputs()
_A , _A , _A , _A = config_and_inputs
_A = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
UpperCAmelCase : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
UpperCAmelCase : Any = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
UpperCAmelCase : Dict = True
UpperCAmelCase : Tuple = False
def lowerCAmelCase_ ( self : Tuple ):
_A = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase )
_A = ConfigTester(self , config_class=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : List[Any] ):
pass
def lowerCAmelCase_ ( self : List[str] ):
pass
def lowerCAmelCase_ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Dict ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : str ):
return
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase_ ( self : Optional[Any] ):
pass
| 315
|
"""simple docstring"""
import numpy
class lowercase_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : numpy.ndarray ):
_A = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_A = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_A = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_A = numpy.random.rand(3 , 1 )
# Real output values provided.
_A = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_A = numpy.zeros(output_array.shape )
def lowerCAmelCase_ ( self : List[str] ):
_A = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowerCAmelCase_ ( self : Optional[int] ):
_A = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_A = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_A = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : bool ):
for iteration in range(1 , iterations + 1 ):
_A = self.feedforward()
self.back_propagation()
if give_loss:
_A = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : numpy.ndarray ):
_A = input_arr
_A = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def _snake_case ( ) -> int:
'''simple docstring'''
_A = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_A = TwoHiddenLayerNeuralNetwork(
input_array=_snake_case , output_array=_snake_case )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 315
| 1
|
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def _lowercase ( self , _lowercase ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as input_file:
_lowerCAmelCase = re.compile(R"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
_lowerCAmelCase = input_file.read()
_lowerCAmelCase = regexp.search(__SCREAMING_SNAKE_CASE )
return match
def _lowercase ( self , _lowercase ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as input_file:
_lowerCAmelCase = re.compile(R"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
_lowerCAmelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_lowerCAmelCase = regexp.finditer(__SCREAMING_SNAKE_CASE )
_lowerCAmelCase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = Path("""./datasets""" )
_lowerCAmelCase = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(F'open(...) must use utf-8 encoding in {dataset}' )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = Path("""./datasets""" )
_lowerCAmelCase = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__SCREAMING_SNAKE_CASE ) ):
raise AssertionError(F'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 363
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[Any] = '''convbert'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase=768 , _lowercase=2 , _lowercase=9 , _lowercase=1 , _lowercase=None , **_lowercase , ):
"""simple docstring"""
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = embedding_size
_lowerCAmelCase = head_ratio
_lowerCAmelCase = conv_kernel_size
_lowerCAmelCase = num_groups
_lowerCAmelCase = classifier_dropout
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 229
| 0
|
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""google/mt5-small""" )
__SCREAMING_SNAKE_CASE = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
__SCREAMING_SNAKE_CASE = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
__SCREAMING_SNAKE_CASE = shift_tokens_right(__SCREAMING_SNAKE_CASE , model.config.pad_token_id , model.config.decoder_start_token_id )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits
__SCREAMING_SNAKE_CASE = optax.softmax_cross_entropy(__SCREAMING_SNAKE_CASE , onehot(__SCREAMING_SNAKE_CASE , logits.shape[-1] ) ).mean()
__SCREAMING_SNAKE_CASE = -(labels.shape[-1] * loss.item())
__SCREAMING_SNAKE_CASE = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 267
|
'''simple docstring'''
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = name
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = weight
def __repr__( self : str ) -> Union[str, Any]:
"""simple docstring"""
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return self.value
def UpperCAmelCase__ ( self : Any ) -> str:
"""simple docstring"""
return self.name
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.weight
def UpperCAmelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
return self.value / self.weight
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sorted(a__ , key=a__ , reverse=a__ )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a__ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 267
| 1
|
from __future__ import annotations
snake_case_ = 'Muhammad Umer Farooq'
snake_case_ = 'MIT'
snake_case_ = '1.0.0'
snake_case_ = 'Muhammad Umer Farooq'
snake_case_ = 'contact@muhammadumerfarooq.me'
snake_case_ = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Dict , a__ : str ):
"""simple docstring"""
super().__init__()
__snake_case = []
__snake_case = domain
def a (self : Tuple , a__ : str , a__ : list[tuple[str, str | None]] ):
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case = parse.urljoin(self.domain , a__ )
self.urls.append(a__ )
def lowerCamelCase__ ( snake_case_ : str ) -> str:
return ".".join(get_sub_domain_name(snake_case_ ).split('''.''' )[-2:] )
def lowerCamelCase__ ( snake_case_ : str ) -> str:
return parse.urlparse(snake_case_ ).netloc
def lowerCamelCase__ ( snake_case_ : str = "https://github.com" ) -> list[str]:
__snake_case = get_domain_name(snake_case_ )
# Initialize the parser
__snake_case = Parser(snake_case_ )
try:
# Open URL
__snake_case = requests.get(snake_case_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case = requests.get(snake_case_ )
# Get the valid email.
__snake_case = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(snake_case_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(snake_case_ )
if __name__ == "__main__":
snake_case_ = emails_from_url('https://github.com')
print(F'{len(emails)} emails found:')
print('\n'.join(sorted(emails)))
| 238
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 238
| 1
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a_ : List[str] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
a_ : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS)
a_ : List[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a_ : List[str] = re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
a_ : Any = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = None
# source code of `config_class`
SCREAMING_SNAKE_CASE = inspect.getsource(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = _re_checkpoint.findall(_UpperCAmelCase)
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/'):
SCREAMING_SNAKE_CASE = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
SCREAMING_SNAKE_CASE = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
SCREAMING_SNAKE_CASE = ckpt_name
break
return checkpoint
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = []
for config_class in list(CONFIG_MAPPING.values()):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
SCREAMING_SNAKE_CASE = get_checkpoint_from_config_class(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_UpperCAmelCase)
if len(_UpperCAmelCase) > 0:
SCREAMING_SNAKE_CASE = '\n'.join(sorted(_UpperCAmelCase))
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''')
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 137
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _snake_case ( A__ ):
_lowercase : int = '''Speech2TextFeatureExtractor'''
_lowercase : List[Any] = '''Speech2TextTokenizer'''
def __init__( self , a , a) -> str:
super().__init__(a , a)
SCREAMING_SNAKE_CASE = self.feature_extractor
SCREAMING_SNAKE_CASE = False
def __call__( self , *a , **a) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a , **a)
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')
SCREAMING_SNAKE_CASE = kwargs.pop('raw_speech')
else:
SCREAMING_SNAKE_CASE = kwargs.pop('audio' , a)
SCREAMING_SNAKE_CASE = kwargs.pop('sampling_rate' , a)
SCREAMING_SNAKE_CASE = kwargs.pop('text' , a)
if len(a) > 0:
SCREAMING_SNAKE_CASE = args[0]
SCREAMING_SNAKE_CASE = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if audio is not None:
SCREAMING_SNAKE_CASE = self.feature_extractor(a , *a , sampling_rate=a , **a)
if text is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(a , **a)
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE = encodings['input_ids']
return inputs
def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> Union[str, Any]:
return self.tokenizer.batch_decode(*a , **a)
def SCREAMING_SNAKE_CASE__ ( self , *a , **a) -> List[str]:
return self.tokenizer.decode(*a , **a)
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.')
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.tokenizer
yield
SCREAMING_SNAKE_CASE = self.feature_extractor
SCREAMING_SNAKE_CASE = False
| 137
| 1
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : str = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowercase : ClassVar[Features] = Features({'text': Value('string' )} )
lowercase : ClassVar[Features] = Features({'labels': ClassLabel} )
lowercase : str = "text"
lowercase : str = "labels"
def __lowerCamelCase ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __UpperCamelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__UpperCamelCase : int = copy.deepcopy(self )
__UpperCamelCase : List[Any] = self.label_schema.copy()
__UpperCamelCase : Union[str, Any] = features[self.label_column]
__UpperCamelCase : Optional[Any] = label_schema
return task_template
@property
def __lowerCamelCase ( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 365
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Optional[Any] = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : List[str] = 'marian'
lowercase : int = ['past_key_values']
lowercase : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __UpperCamelCase=5_81_01 , __UpperCamelCase=None , __UpperCamelCase=10_24 , __UpperCamelCase=12 , __UpperCamelCase=40_96 , __UpperCamelCase=16 , __UpperCamelCase=12 , __UpperCamelCase=40_96 , __UpperCamelCase=16 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="gelu" , __UpperCamelCase=10_24 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=5_81_00 , __UpperCamelCase=False , __UpperCamelCase=5_81_00 , __UpperCamelCase=0 , __UpperCamelCase=0 , __UpperCamelCase=True , **__UpperCamelCase , ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Any = vocab_size
__UpperCamelCase : str = decoder_vocab_size or vocab_size
__UpperCamelCase : Any = max_position_embeddings
__UpperCamelCase : List[Any] = d_model
__UpperCamelCase : Optional[int] = encoder_ffn_dim
__UpperCamelCase : Union[str, Any] = encoder_layers
__UpperCamelCase : Tuple = encoder_attention_heads
__UpperCamelCase : Dict = decoder_ffn_dim
__UpperCamelCase : Optional[Any] = decoder_layers
__UpperCamelCase : Optional[int] = decoder_attention_heads
__UpperCamelCase : Union[str, Any] = dropout
__UpperCamelCase : List[str] = attention_dropout
__UpperCamelCase : int = activation_dropout
__UpperCamelCase : Tuple = activation_function
__UpperCamelCase : List[str] = init_std
__UpperCamelCase : int = encoder_layerdrop
__UpperCamelCase : List[Any] = decoder_layerdrop
__UpperCamelCase : Dict = use_cache
__UpperCamelCase : str = encoder_layers
__UpperCamelCase : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase : List[str] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase : Union[str, Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__UpperCamelCase : str = {0: "batch"}
__UpperCamelCase : Optional[int] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__UpperCamelCase : Optional[Any] = {0: "batch", 1: "decoder_sequence"}
__UpperCamelCase : List[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__UpperCamelCase : str = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__UpperCamelCase , __UpperCamelCase : Any = self.num_layers
for i in range(__UpperCamelCase ):
__UpperCamelCase : Any = {0: "batch", 2: "past_sequence + sequence"}
__UpperCamelCase : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
else:
__UpperCamelCase : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase : List[Any] = super().outputs
else:
__UpperCamelCase : Optional[Any] = super(__UpperCamelCase , self ).outputs
if self.use_past:
__UpperCamelCase , __UpperCamelCase : int = self.num_layers
for i in range(__UpperCamelCase ):
__UpperCamelCase : List[str] = {0: "batch", 2: "past_sequence + sequence"}
__UpperCamelCase : str = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__UpperCamelCase : str = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Generate decoder inputs
__UpperCamelCase : Any = seq_length if not self.use_past else 1
__UpperCamelCase : int = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__UpperCamelCase : Any = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
__UpperCamelCase : List[Any] = dict(**__UpperCamelCase , **__UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__UpperCamelCase , __UpperCamelCase : Dict = common_inputs["input_ids"].shape
__UpperCamelCase : Dict = common_inputs["decoder_input_ids"].shape[1]
__UpperCamelCase , __UpperCamelCase : Any = self.num_attention_heads
__UpperCamelCase : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__UpperCamelCase : List[str] = decoder_seq_length + 3
__UpperCamelCase : Optional[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__UpperCamelCase : List[str] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 )
__UpperCamelCase : int = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__UpperCamelCase , __UpperCamelCase : List[str] = self.num_layers
__UpperCamelCase : Optional[int] = min(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase : Optional[int] = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers
__UpperCamelCase : Dict = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__UpperCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
) )
# TODO: test this.
__UpperCamelCase : Any = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__UpperCamelCase , __UpperCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) )
return common_inputs
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__UpperCamelCase : int = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__UpperCamelCase , __UpperCamelCase : str = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__UpperCamelCase : int = seqlen + 2
__UpperCamelCase , __UpperCamelCase : str = self.num_layers
__UpperCamelCase , __UpperCamelCase : List[str] = self.num_attention_heads
__UpperCamelCase : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__UpperCamelCase : Any = common_inputs["attention_mask"].dtype
__UpperCamelCase : Optional[Any] = torch.cat(
[common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
__UpperCamelCase : int = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase )
]
return common_inputs
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__UpperCamelCase : Any = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__UpperCamelCase : List[Any] = tokenizer.num_special_tokens_to_add(__UpperCamelCase )
__UpperCamelCase : Union[str, Any] = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
__UpperCamelCase : Tuple = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__UpperCamelCase : Tuple = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) )
return common_inputs
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase : int = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
else:
__UpperCamelCase : int = self._generate_dummy_inputs_for_causal_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
return common_inputs
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase : List[Any] = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
__UpperCamelCase : str = super(__UpperCamelCase , self )._flatten_past_key_values_(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@property
def __lowerCamelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 171
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__A : Any = logging.get_logger(__name__)
__A : List[Any] = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _a ( __lowercase):
"""simple docstring"""
UpperCamelCase__ = '''dpt'''
def __init__( self : int , __UpperCamelCase : List[Any]=7_6_8 , __UpperCamelCase : Optional[Any]=1_2 , __UpperCamelCase : str=1_2 , __UpperCamelCase : List[str]=3_0_7_2 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : List[Any]=1e-12 , __UpperCamelCase : List[str]=3_8_4 , __UpperCamelCase : int=1_6 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : List[str]=False , __UpperCamelCase : str=True , __UpperCamelCase : str=[2, 5, 8, 1_1] , __UpperCamelCase : Union[str, Any]="project" , __UpperCamelCase : List[Any]=[4, 2, 1, 0.5] , __UpperCamelCase : List[Any]=[9_6, 1_9_2, 3_8_4, 7_6_8] , __UpperCamelCase : Any=2_5_6 , __UpperCamelCase : Optional[Any]=-1 , __UpperCamelCase : List[str]=False , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Dict=0.4 , __UpperCamelCase : Any=2_5_5 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Dict=[1, 1_0_2_4, 2_4, 2_4] , __UpperCamelCase : Any=[0, 1] , __UpperCamelCase : Dict=None , **__UpperCamelCase : str , )->int:
super().__init__(**UpperCAmelCase__ )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
_UpperCAmelCase = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
_UpperCAmelCase = BitConfig(**UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
_UpperCAmelCase = BitConfig(**UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_UpperCAmelCase = backbone_config
else:
raise ValueError(
F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
_UpperCAmelCase = backbone_featmap_shape
_UpperCAmelCase = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = []
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
_UpperCAmelCase = readout_type
_UpperCAmelCase = reassemble_factors
_UpperCAmelCase = neck_hidden_sizes
_UpperCAmelCase = fusion_hidden_size
_UpperCAmelCase = head_in_index
_UpperCAmelCase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = semantic_loss_ignore_index
_UpperCAmelCase = semantic_classifier_dropout
def lowercase__ ( self : str )->Dict:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 260
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
if is_torch_available():
import torch
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : int=2_0_0_0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=1_6_0_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase__ : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = ASTFeatureExtractor
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ASTFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Tuple:
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# fmt: off
lowerCAmelCase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = ASTFeatureExtractor()
lowerCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-4 ) )
| 4
| 0
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = 'hf-internal-testing/tiny-random-t5'
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_UpperCamelCase )
UpperCAmelCase_ : Any = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase )
UpperCAmelCase_ : int = tokenizer('This is me' , return_tensors='pt' )
UpperCAmelCase_ : Union[str, Any] = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCAmelCase_ : Tuple = model.generate(**_UpperCamelCase )
UpperCAmelCase_ : List[Any] = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCAmelCase_ : Any = model_reloaded.generate(**_UpperCamelCase )
self.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase ) )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[Any] = 'hf-internal-testing/tiny-random-t5'
UpperCAmelCase_ : Any = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase )
UpperCAmelCase_ : str = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_UpperCamelCase ):
model.save_pretrained(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = model.reverse_bettertransformer()
model.save_pretrained(_UpperCamelCase )
| 350
|
from __future__ import annotations
def lowercase__ ( __snake_case : list[int] , __snake_case : int ):
'''simple docstring'''
if len(__snake_case ) < k or k < 0:
raise ValueError('Invalid Input' )
UpperCAmelCase_ : int = sum(array[:k] )
for i in range(len(__snake_case ) - k ):
UpperCAmelCase_ : List[Any] = current_sum - array[i] + array[i + k]
UpperCAmelCase_ : List[Any] = max(__snake_case , __snake_case )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__UpperCAmelCase = [randint(-1000, 1000) for i in range(100)]
__UpperCAmelCase = randint(0, 110)
print(F'The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}')
| 145
| 0
|
import sys
from collections import defaultdict
class A__ :
def __init__( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = []
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return self.node_position[vertex]
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = pos
def __UpperCamelCase( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCamelCase : List[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCamelCase : Any = 2 * start + 1
else:
UpperCamelCase : str = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCamelCase , UpperCamelCase : str = heap[smallest_child], positions[smallest_child]
UpperCamelCase , UpperCamelCase : Union[str, Any] = (
heap[start],
positions[start],
)
UpperCamelCase , UpperCamelCase : Optional[Any] = temp, tempa
UpperCamelCase : Any = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , A_ )
self.top_to_bottom(A_ , A_ , A_ , A_ )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Any = position[index]
while index != 0:
UpperCamelCase : Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCamelCase : Dict = heap[parent]
UpperCamelCase : int = position[parent]
self.set_position(position[parent] , A_ )
else:
UpperCamelCase : Optional[int] = val
UpperCamelCase : Tuple = temp
self.set_position(A_ , A_ )
break
UpperCamelCase : List[Any] = parent
else:
UpperCamelCase : Dict = val
UpperCamelCase : Tuple = temp
self.set_position(A_ , 0 )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = len(A_ ) // 2 - 1
for i in range(A_ , -1 , -1 ):
self.top_to_bottom(A_ , A_ , len(A_ ) , A_ )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = positions[0]
UpperCamelCase : Any = sys.maxsize
self.top_to_bottom(A_ , 0 , len(A_ ) , A_ )
return temp
def A_ ( _lowerCAmelCase ) -> Any:
UpperCamelCase : List[Any] = Heap()
UpperCamelCase : Union[str, Any] = [0] * len(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = [-1] * len(_lowerCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCamelCase : Tuple = [] # Heap of Distance of vertices from their neighboring vertex
UpperCamelCase : Optional[Any] = []
for vertex in range(len(_lowerCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCAmelCase )
heap.node_position.append(_lowerCAmelCase )
UpperCamelCase : int = []
UpperCamelCase : Any = 1
UpperCamelCase : List[Any] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCamelCase : List[Any] = 0
UpperCamelCase : int = distance
heap.heapify(_lowerCAmelCase , _lowerCAmelCase )
for _ in range(1 , len(_lowerCAmelCase ) ):
UpperCamelCase : str = heap.delete_minimum(_lowerCAmelCase , _lowerCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCamelCase : Tuple = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCAmelCase )]
):
UpperCamelCase : Any = distance
heap.bottom_to_top(
_lowerCAmelCase , heap.get_position(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : Union[str, Any] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowerCamelCase : Any = int(input("""Enter number of edges: """).strip())
__lowerCamelCase : Optional[Any] = defaultdict(list)
for _ in range(edges_number):
__lowerCamelCase : Optional[int] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 52
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase_ ( snake_case_ : Any ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def UpperCamelCase_ ( snake_case_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = emb.weight.shape
__lowerCAmelCase = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
__lowerCAmelCase = emb.weight.data
return lin_layer
def UpperCamelCase_ ( snake_case_ : Any ) -> Any:
'''simple docstring'''
__lowerCAmelCase = torch.load(snake_case_ , map_location="""cpu""" )
__lowerCAmelCase = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
__lowerCAmelCase = mam_aaa["""model"""]
remove_ignore_keys_(snake_case_ )
__lowerCAmelCase = state_dict["""encoder.embed_tokens.weight"""].shape[0]
__lowerCAmelCase = MaMaaaConfig(
vocab_size=snake_case_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
__lowerCAmelCase = state_dict["""decoder.embed_tokens.weight"""]
__lowerCAmelCase = MaMaaaForConditionalGeneration(snake_case_ )
model.model.load_state_dict(snake_case_ , strict=snake_case_ )
__lowerCAmelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A : str = parser.parse_args()
_A : Optional[int] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 229
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Any =logging.get_logger(__name__)
A__ : Optional[Any] ={
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Tuple = '''pegasus'''
_lowercase: Optional[Any] = ['''past_key_values''']
_lowercase: Optional[int] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Any , __snake_case : Dict=5_02_65 , __snake_case : Dict=10_24 , __snake_case : str=12 , __snake_case : Tuple=40_96 , __snake_case : str=16 , __snake_case : int=12 , __snake_case : Optional[int]=40_96 , __snake_case : Tuple=16 , __snake_case : str=0.0 , __snake_case : Tuple=0.0 , __snake_case : List[str]=True , __snake_case : int=True , __snake_case : Optional[int]="gelu" , __snake_case : int=10_24 , __snake_case : str=0.1 , __snake_case : Union[str, Any]=0.0 , __snake_case : Optional[int]=0.0 , __snake_case : List[str]=0.02 , __snake_case : str=0 , __snake_case : int=False , __snake_case : Optional[Any]=0 , __snake_case : Tuple=1 , __snake_case : str=1 , **__snake_case : List[str] , ) -> List[Any]:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
@property
def lowercase__ ( self : List[str] ) -> int:
return self.encoder_attention_heads
@property
def lowercase__ ( self : Dict ) -> int:
return self.d_model
| 220
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(snake_case_ ) , '''Tatoeba directory does not exist.''' )
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : int ) -> Any:
_lowerCAmelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__snake_case )
@slow
def lowercase__ ( self : Dict ) -> int:
self.resolver.convert_models(["""heb-eng"""] )
@slow
def lowercase__ ( self : Optional[int] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=__snake_case )
assert mmeta["long_pair"] == "heb-eng"
| 220
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_lowercase : List[Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Any, *lowerCamelCase : List[str], **lowerCamelCase : str )-> None:
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''', lowerCamelCase, )
super().__init__(*lowerCamelCase, **lowerCamelCase )
| 238
|
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __get__( self : Tuple, lowerCamelCase : List[str], lowerCamelCase : Optional[int]=None )-> List[str]:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
lowerCamelCase__ : List[str] ='''__cached_''' + self.fget.__name__
lowerCamelCase__ : List[Any] =getattr(lowerCamelCase, lowerCamelCase, lowerCamelCase )
if cached is None:
lowerCamelCase__ : Optional[int] =self.fget(lowerCamelCase )
setattr(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return cached
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
if is_torch_fx_proxy(__lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(__lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(__lowerCamelCase , np.ndarray )
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
return isinstance(__lowerCamelCase , np.ndarray )
def snake_case__ ( __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return _is_numpy(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Any ):
"""simple docstring"""
import torch
return isinstance(__lowerCamelCase , torch.Tensor )
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : List[str] ):
"""simple docstring"""
import torch
return isinstance(__lowerCamelCase , torch.device )
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
import torch
if isinstance(__lowerCamelCase , __lowerCamelCase ):
if hasattr(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : Tuple =getattr(__lowerCamelCase , __lowerCamelCase )
else:
return False
return isinstance(__lowerCamelCase , torch.dtype )
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
import tensorflow as tf
return isinstance(__lowerCamelCase , tf.Tensor )
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__lowerCamelCase , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(__lowerCamelCase )
return type(__lowerCamelCase ) == tf.Tensor
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(__lowerCamelCase , jnp.ndarray )
def snake_case__ ( __lowerCamelCase : Tuple ):
"""simple docstring"""
return False if not is_flax_available() else _is_jax(__lowerCamelCase )
def snake_case__ ( __lowerCamelCase : List[str] ):
"""simple docstring"""
if isinstance(__lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(__lowerCamelCase ) for k, v in obj.items()}
elif isinstance(__lowerCamelCase , (list, tuple) ):
return [to_py_obj(__lowerCamelCase ) for o in obj]
elif is_tf_tensor(__lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(__lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__lowerCamelCase ):
return np.asarray(__lowerCamelCase ).tolist()
elif isinstance(__lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
if isinstance(__lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(__lowerCamelCase ) for k, v in obj.items()}
elif isinstance(__lowerCamelCase , (list, tuple) ):
return np.array(__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(__lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__lowerCamelCase ):
return np.asarray(__lowerCamelCase )
else:
return obj
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def snake_case ( self : int )-> Optional[int]:
lowerCamelCase__ : Union[str, Any] =fields(self )
# Safety and consistency checks
if not len(lowerCamelCase ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
lowerCamelCase__ : List[Any] =getattr(self, class_fields[0].name )
lowerCamelCase__ : Union[str, Any] =all(getattr(self, field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowerCamelCase ):
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Optional[int] =first_field.items()
lowerCamelCase__ : Union[str, Any] =True
else:
try:
lowerCamelCase__ : int =iter(lowerCamelCase )
lowerCamelCase__ : List[Any] =True
except TypeError:
lowerCamelCase__ : List[Any] =False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowerCamelCase ):
if (
not isinstance(lowerCamelCase, (list, tuple) )
or not len(lowerCamelCase ) == 2
or not isinstance(element[0], lowerCamelCase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCamelCase__ : Optional[int] =first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self, element[0], element[1] )
if element[1] is not None:
lowerCamelCase__ : str =element[1]
elif first_field is not None:
lowerCamelCase__ : Dict =first_field
else:
for field in class_fields:
lowerCamelCase__ : Union[str, Any] =getattr(self, field.name )
if v is not None:
lowerCamelCase__ : Optional[int] =v
def __delitem__( self : int, *lowerCamelCase : List[str], **lowerCamelCase : Optional[int] )-> str:
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def snake_case ( self : Optional[int], *lowerCamelCase : int, **lowerCamelCase : List[str] )-> Optional[Any]:
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def snake_case ( self : Dict, *lowerCamelCase : Optional[int], **lowerCamelCase : Optional[Any] )-> int:
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def snake_case ( self : List[Any], *lowerCamelCase : Tuple, **lowerCamelCase : List[Any] )-> Optional[int]:
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : Optional[Any], lowerCamelCase : Optional[int] )-> List[Any]:
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] =dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Union[str, Any], lowerCamelCase : List[str], lowerCamelCase : List[str] )-> Dict:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowerCamelCase, lowerCamelCase )
super().__setattr__(lowerCamelCase, lowerCamelCase )
def __setitem__( self : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : int )-> List[Any]:
# Will raise a KeyException if needed
super().__setitem__(lowerCamelCase, lowerCamelCase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowerCamelCase, lowerCamelCase )
def snake_case ( self : str )-> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@classmethod
def snake_case ( cls : Optional[Any], lowerCamelCase : int )-> str:
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'longest'
_a = 'max_length'
_a = 'do_not_pad'
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'pt'
_a = 'tf'
_a = 'np'
_a = 'jax'
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int], lowerCamelCase : List[ContextManager] )-> str:
lowerCamelCase__ : List[str] =context_managers
lowerCamelCase__ : int =ExitStack()
def __enter__( self : List[str] )-> Union[str, Any]:
for context_manager in self.context_managers:
self.stack.enter_context(lowerCamelCase )
def __exit__( self : Tuple, *lowerCamelCase : Union[str, Any], **lowerCamelCase : Tuple )-> List[Any]:
self.stack.__exit__(*lowerCamelCase, **lowerCamelCase )
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Tuple =infer_framework(__lowerCamelCase )
if framework == "tf":
lowerCamelCase__ : Any =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase__ : Tuple =inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase__ : List[str] =inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =model_class.__name__
lowerCamelCase__ : Tuple =infer_framework(__lowerCamelCase )
if framework == "tf":
lowerCamelCase__ : int =inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase__ : Any =inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase__ : Union[str, Any] =inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def snake_case__ ( __lowerCamelCase : MutableMapping , __lowerCamelCase : str = "" , __lowerCamelCase : str = "." ):
"""simple docstring"""
def _flatten_dict(__lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int]="" , __lowerCamelCase : str="." ):
for k, v in d.items():
lowerCamelCase__ : List[str] =str(__lowerCamelCase ) + delimiter + str(__lowerCamelCase ) if parent_key else k
if v and isinstance(__lowerCamelCase , __lowerCamelCase ):
yield from flatten_dict(__lowerCamelCase , __lowerCamelCase , delimiter=__lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) )
@contextmanager
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : bool = False ):
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=None ):
"""simple docstring"""
if is_numpy_array(__lowerCamelCase ):
return np.transpose(__lowerCamelCase , axes=__lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.T if axes is None else array.permute(*__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.transpose(__lowerCamelCase , perm=__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.transpose(__lowerCamelCase , axes=__lowerCamelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(__lowerCamelCase )}.''' )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
if is_numpy_array(__lowerCamelCase ):
return np.reshape(__lowerCamelCase , __lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.reshape(*__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.reshape(__lowerCamelCase , __lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.reshape(__lowerCamelCase , __lowerCamelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(__lowerCamelCase )}.''' )
def snake_case__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=None ):
"""simple docstring"""
if is_numpy_array(__lowerCamelCase ):
return np.squeeze(__lowerCamelCase , axis=__lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(__lowerCamelCase , axis=__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.squeeze(__lowerCamelCase , axis=__lowerCamelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(__lowerCamelCase )}.''' )
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
if is_numpy_array(__lowerCamelCase ):
return np.expand_dims(__lowerCamelCase , __lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.unsqueeze(dim=__lowerCamelCase )
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(__lowerCamelCase , axis=__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return jnp.expand_dims(__lowerCamelCase , axis=__lowerCamelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(__lowerCamelCase )}.''' )
def snake_case__ ( __lowerCamelCase : List[Any] ):
"""simple docstring"""
if is_numpy_array(__lowerCamelCase ):
return np.size(__lowerCamelCase )
elif is_torch_tensor(__lowerCamelCase ):
return array.numel()
elif is_tf_tensor(__lowerCamelCase ):
import tensorflow as tf
return tf.size(__lowerCamelCase )
elif is_jax_tensor(__lowerCamelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(__lowerCamelCase )}.''' )
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple ):
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(__lowerCamelCase , (tuple, list) ):
lowerCamelCase__ : Optional[int] =[f'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCamelCase__ : Tuple =f'''{repo_id}--{value}'''
return auto_map
def snake_case__ ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
for base_class in inspect.getmro(__lowerCamelCase ):
lowerCamelCase__ : Tuple =base_class.__module__
lowerCamelCase__ : Tuple =base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 238
| 1
|
'''simple docstring'''
import logging
from transformers import PretrainedConfig
lowerCAmelCase: str = logging.getLogger(__name__)
lowerCAmelCase: Optional[Any] = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class a__( lowerCamelCase__ ):
lowercase__ = """bertabs"""
def __init__( self : List[str] , __snake_case : Any=3_05_22 , __snake_case : Any=5_12 , __snake_case : Optional[Any]=6 , __snake_case : Union[str, Any]=5_12 , __snake_case : int=8 , __snake_case : Any=5_12 , __snake_case : Any=0.2 , __snake_case : List[Any]=6 , __snake_case : Optional[int]=7_68 , __snake_case : int=8 , __snake_case : Union[str, Any]=20_48 , __snake_case : Dict=0.2 , **__snake_case : Any , ):
super().__init__(**__snake_case )
a : List[str] = vocab_size
a : List[str] = max_pos
a : str = enc_layers
a : Optional[int] = enc_hidden_size
a : Optional[int] = enc_heads
a : List[str] = enc_ff_size
a : Optional[Any] = enc_dropout
a : Dict = dec_layers
a : List[str] = dec_hidden_size
a : str = dec_heads
a : str = dec_ff_size
a : Any = dec_dropout
| 96
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase: List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[int] = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Dict = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCAmelCase: Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 96
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ (lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = KandinskyVaaInpaintPipeline
SCREAMING_SNAKE_CASE__ : int = ["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
SCREAMING_SNAKE_CASE__ : Dict = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
SCREAMING_SNAKE_CASE__ : Tuple = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
SCREAMING_SNAKE_CASE__ : Tuple = False
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 100
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : str = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCAmelCase_ : Optional[Any] = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : str = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.dummy_unet
UpperCAmelCase_ : Optional[int] = self.dummy_movq
UpperCAmelCase_ : Tuple = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=_lowerCamelCase , )
UpperCAmelCase_ : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ):
"""simple docstring"""
UpperCAmelCase_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
UpperCAmelCase_ : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCamelCase )
# create init_image
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
UpperCAmelCase_ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Any = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert("RGB" ).resize((256, 256) )
# create mask
UpperCAmelCase_ : Optional[Any] = np.ones((64, 64) , dtype=np.floataa )
UpperCAmelCase_ : Dict = 0
if str(_lowerCamelCase ).startswith("mps" ):
UpperCAmelCase_ : int = torch.manual_seed(_lowerCamelCase )
else:
UpperCAmelCase_ : int = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
UpperCAmelCase_ : str = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = """cpu"""
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_lowerCamelCase )
UpperCAmelCase_ : str = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCAmelCase_ : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
UpperCAmelCase_ : Optional[Any] = output.images
UpperCAmelCase_ : Optional[Any] = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : str = np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" )
UpperCAmelCase_ : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase_ : Dict = np.ones((768, 768) , dtype=np.floataa )
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : str = """a hat"""
UpperCAmelCase_ : str = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa )
UpperCAmelCase_ : Union[str, Any] = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
UpperCAmelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ : int = pipe_prior(
_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ : str = pipeline(
image=_lowerCamelCase , mask_image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
UpperCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 61
|
"""simple docstring"""
import numpy as np
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1E-12 , lowerCAmelCase = 1_00 , ) -> tuple[float, np.ndarray]:
assert np.shape(lowerCAmelCase )[0] == np.shape(lowerCAmelCase )[1]
# Ensure proper dimensionality.
assert np.shape(lowerCAmelCase )[0] == np.shape(lowerCAmelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCAmelCase ) == np.iscomplexobj(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = np.iscomplexobj(lowerCAmelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCAmelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : Optional[int] = 1E12
while not convergence:
# Multiple matrix by the vector.
UpperCAmelCase__ : int = np.dot(lowerCAmelCase , lowerCAmelCase )
# Normalize the resulting output vector.
UpperCAmelCase__ : Optional[Any] = w / np.linalg.norm(lowerCAmelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
UpperCAmelCase__ : List[Any] = vector.conj().T if is_complex else vector.T
UpperCAmelCase__ : Optional[Any] = np.dot(lowerCAmelCase , np.dot(lowerCAmelCase , lowerCAmelCase ) )
# Check convergence.
UpperCAmelCase__ : Union[str, Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : List[Any] = lambda_
if is_complex:
UpperCAmelCase__ : Any = np.real(lambda_ )
return lambda_, vector
def a__ ( ) -> None:
UpperCAmelCase__ : Tuple = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
UpperCAmelCase__ : int = np.array([41, 4, 20] )
UpperCAmelCase__ : str = real_input_matrix.astype(np.complexaaa )
UpperCAmelCase__ : Any = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
UpperCAmelCase__ : Dict = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
UpperCAmelCase__ : List[str] = real_input_matrix
UpperCAmelCase__ : Any = real_vector
elif problem_type == "complex":
UpperCAmelCase__ : List[Any] = complex_input_matrix
UpperCAmelCase__ : int = complex_vector
# Our implementation.
UpperCAmelCase__ , UpperCAmelCase__ : int = power_iteration(lowerCAmelCase , lowerCAmelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = np.linalg.eigh(lowerCAmelCase )
# Last eigenvalue is the maximum one.
UpperCAmelCase__ : str = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
UpperCAmelCase__ : List[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCAmelCase ) - np.abs(lowerCAmelCase ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 171
| 0
|
import numpy
class lowercase__ :
def __init__( self : Optional[int] , UpperCamelCase__ : numpy.ndarray , UpperCamelCase__ : numpy.ndarray ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE : Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE : Optional[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE : Union[str, Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE : Tuple = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE : Optional[Any] = numpy.zeros(output_array.shape )
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE : List[str] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __A ( self : Union[str, Any] , UpperCamelCase__ : numpy.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : bool ):
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE : int = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE : Dict = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def __A ( self : str , UpperCamelCase__ : numpy.ndarray ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = input_arr
SCREAMING_SNAKE_CASE : int = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def A ( _lowercase ):
return 1 / (1 + numpy.exp(-value ))
def A ( _lowercase ):
return (value) * (1 - (value))
def A ( ):
SCREAMING_SNAKE_CASE : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE : Optional[int] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=_lowercase , output_array=_lowercase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowercase , iterations=10 , give_loss=_lowercase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 258
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase__ :
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]="resnet50" , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Any=True , UpperCamelCase__ : int=True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = out_indices if out_indices is not None else [4]
SCREAMING_SNAKE_CASE : List[Any] = stage_names
SCREAMING_SNAKE_CASE : int = out_features
SCREAMING_SNAKE_CASE : Optional[int] = backbone
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : Dict = image_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Dict = is_training
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values
def __A ( self : List[Any] ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = TimmBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(UpperCamelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = (TimmBackbone,) if is_torch_available() else ()
UpperCamelCase_ = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TimmBackboneModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def __A ( self : List[Any] ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''resnet18'''
SCREAMING_SNAKE_CASE : str = '''microsoft/resnet-18'''
SCREAMING_SNAKE_CASE : Dict = AutoBackbone.from_pretrained(UpperCamelCase__ , use_timm_backbone=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = AutoBackbone.from_pretrained(UpperCamelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
SCREAMING_SNAKE_CASE : List[str] = AutoBackbone.from_pretrained(UpperCamelCase__ , use_timm_backbone=UpperCamelCase__ , out_indices=[1, 2, 3] )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoBackbone.from_pretrained(UpperCamelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def __A ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def __A ( self : int ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def __A ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __A ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __A ( self : Any ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def __A ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __A ( self : int ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __A ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def __A ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def __A ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self : int ):
'''simple docstring'''
pass
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Any = self.has_attentions
# no need to test all models as different heads yield the same functionality
SCREAMING_SNAKE_CASE : Any = self.all_model_classes[0]
SCREAMING_SNAKE_CASE : List[str] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = outputs[0][-1]
# Encoder-/Decoder-only models
SCREAMING_SNAKE_CASE : List[Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
SCREAMING_SNAKE_CASE : Any = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCamelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(**UpperCamelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
SCREAMING_SNAKE_CASE : List[str] = copy.deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(**UpperCamelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(**UpperCamelCase__ )
| 258
| 1
|
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCAmelCase_ : List[str] = HfApi()
lowerCAmelCase_ : str = {}
# fmt: off
lowerCAmelCase_ : Any = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
lowerCAmelCase_ : List[Any] = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
lowerCAmelCase_ : int = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
lowerCAmelCase_ : List[Any] = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
lowerCAmelCase_ : Tuple = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
lowerCAmelCase_ : List[str] = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
lowerCAmelCase_ : Optional[Any] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
lowerCAmelCase_ : Optional[Any] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
lowerCAmelCase_ : Optional[int] = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
lowerCAmelCase_ : List[Any] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
lowerCAmelCase_ : Tuple = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
lowerCAmelCase_ : str = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
lowerCAmelCase_ : int = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
lowerCAmelCase_ : Union[str, Any] = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
lowerCAmelCase_ : List[str] = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
lowerCAmelCase_ : Any = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCAmelCase_ : Union[str, Any] = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('CompVis'):
lowerCAmelCase_ : Dict = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
lowerCAmelCase_ : int = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCAmelCase_ : Optional[int] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCAmelCase_ : str = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 63
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = '''bridgetower_vision_model'''
def __init__( self : int , lowerCAmelCase__ : List[Any]=7_6_8 , lowerCAmelCase__ : Dict=1_2 , lowerCAmelCase__ : Union[str, Any]=3 , lowerCAmelCase__ : Dict=1_6 , lowerCAmelCase__ : int=2_8_8 , lowerCAmelCase__ : Dict=1 , lowerCAmelCase__ : int=1e-05 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : int=True , lowerCAmelCase__ : int=False , **lowerCAmelCase__ : Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Tuple = patch_size
_UpperCAmelCase : str = image_size
_UpperCAmelCase : List[Any] = initializer_factor
_UpperCAmelCase : Any = layer_norm_eps
_UpperCAmelCase : Optional[Any] = stop_gradient
_UpperCAmelCase : List[str] = share_layernorm
_UpperCAmelCase : List[str] = remove_last_layer
@classmethod
def _lowerCAmelCase ( cls : Optional[Any] , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : Any ) -> "PretrainedConfig":
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : Any = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
if config_dict.get("model_type" ) == "bridgetower":
_UpperCAmelCase : Optional[Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = '''bridgetower_text_model'''
def __init__( self : int , lowerCAmelCase__ : Optional[int]=5_0_2_6_5 , lowerCAmelCase__ : Tuple=7_6_8 , lowerCAmelCase__ : List[str]=1_2 , lowerCAmelCase__ : int=1_2 , lowerCAmelCase__ : str=1 , lowerCAmelCase__ : Optional[Any]=3_0_7_2 , lowerCAmelCase__ : List[Any]="gelu" , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : int=5_1_4 , lowerCAmelCase__ : List[Any]=1 , lowerCAmelCase__ : Any=1e-05 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : List[Any]=0 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : List[Any]="absolute" , lowerCAmelCase__ : Optional[Any]=True , **lowerCAmelCase__ : Any , ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : List[str] = vocab_size
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : Tuple = num_hidden_layers
_UpperCAmelCase : Dict = num_attention_heads
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : int = initializer_factor
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : Tuple = max_position_embeddings
_UpperCAmelCase : Any = type_vocab_size
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : Optional[Any] = position_embedding_type
_UpperCAmelCase : Optional[int] = use_cache
_UpperCAmelCase : Optional[Any] = pad_token_id
_UpperCAmelCase : Union[str, Any] = bos_token_id
_UpperCAmelCase : int = eos_token_id
@classmethod
def _lowerCAmelCase ( cls : Tuple , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : Dict ) -> "PretrainedConfig":
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : str = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
if config_dict.get("model_type" ) == "bridgetower":
_UpperCAmelCase : int = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Any = '''bridgetower'''
def __init__( self : List[str] , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Any="gelu" , lowerCAmelCase__ : Dict=7_6_8 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : List[str]=1e-05 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : str="add" , lowerCAmelCase__ : Tuple=1_2 , lowerCAmelCase__ : Optional[int]=6 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Optional[Any] , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = kwargs.pop("text_config_dict" , lowerCAmelCase__ )
_UpperCAmelCase : int = kwargs.pop("vision_config_dict" , lowerCAmelCase__ )
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = share_cross_modal_transformer_layers
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : Tuple = initializer_factor
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : Tuple = share_link_tower_layers
_UpperCAmelCase : List[str] = link_tower_type
_UpperCAmelCase : int = num_attention_heads
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : Optional[int] = tie_word_embeddings
_UpperCAmelCase : int = init_layernorm_from_vision_encoder
if text_config is None:
_UpperCAmelCase : str = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
_UpperCAmelCase : Union[str, Any] = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
_UpperCAmelCase : str = BridgeTowerTextConfig(**lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = BridgeTowerVisionConfig(**lowerCAmelCase__ )
@classmethod
def _lowerCAmelCase ( cls : Union[str, Any] , lowerCAmelCase__ : BridgeTowerTextConfig , lowerCAmelCase__ : BridgeTowerVisionConfig , **lowerCAmelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase__ )
def _lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : str = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Union[str, Any] = self.text_config.to_dict()
_UpperCAmelCase : Union[str, Any] = self.vision_config.to_dict()
_UpperCAmelCase : List[str] = self.__class__.model_type
return output
| 145
| 0
|
'''simple docstring'''
from collections.abc import Callable
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase = None ) -> None:
# Stores actual heap items.
lowercase__ : list = []
# Stores indexes of each item for supporting updates and deletion.
lowercase__ : dict = {}
# Stores current size of heap.
lowercase__ : int = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowercase__ : Tuple = key or (lambda __lowerCAmelCase : x)
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int | None:
return int((i - 1) / 2 ) if i > 0 else None
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int | None:
lowercase__ : Tuple = int(2 * i + 1 )
return left if 0 < left < self.size else None
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int | None:
lowercase__ : Dict = int(2 * i + 2 )
return right if 0 < right < self.size else None
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> None:
lowercase__ : int = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowercase__ : Optional[int] = self.arr[j], self.arr[i]
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
return self.arr[i][1] < self.arr[j][1]
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
lowercase__ : int = self._left(__lowerCAmelCase )
lowercase__ : List[str] = self._right(__lowerCAmelCase )
lowercase__ : List[Any] = i
if left is not None and not self._cmp(__lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : str = left
if right is not None and not self._cmp(__lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : Dict = right
return valid_parent
def _lowerCAmelCase( self , __lowerCAmelCase ) -> None:
lowercase__ : Tuple = self._parent(__lowerCAmelCase )
while parent is not None and not self._cmp(__lowerCAmelCase , __lowerCAmelCase ):
self._swap(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : Dict = parent, self._parent(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> None:
lowercase__ : List[Any] = self._get_valid_parent(__lowerCAmelCase )
while valid_parent != index:
self._swap(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : str = valid_parent, self._get_valid_parent(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> None:
if item not in self.pos_map:
return
lowercase__ : List[Any] = self.pos_map[item]
lowercase__ : Tuple = [item, self.key(__lowerCAmelCase )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__lowerCAmelCase )
self._heapify_down(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> None:
if item not in self.pos_map:
return
lowercase__ : Optional[int] = self.pos_map[item]
del self.pos_map[item]
lowercase__ : Dict = self.arr[self.size - 1]
lowercase__ : str = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__lowerCAmelCase )
self._heapify_down(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> None:
lowercase__ : int = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(__lowerCAmelCase )] )
else:
lowercase__ : str = [item, self.key(__lowerCAmelCase )]
lowercase__ : List[str] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def _lowerCAmelCase( self ) -> tuple | None:
return self.arr[0] if self.size else None
def _lowerCAmelCase( self ) -> tuple | None:
lowercase__ : Optional[int] = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __UpperCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=2 , __lowerCAmelCase=32 , __lowerCAmelCase=16 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=32 , __lowerCAmelCase=4 , __lowerCAmelCase=[0, 1, 2, 3] , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=3 , __lowerCAmelCase=[1, 384, 24, 24] , __lowerCAmelCase=True , __lowerCAmelCase=None , ) -> Dict:
lowercase__ : str = parent
lowercase__ : List[Any] = batch_size
lowercase__ : Dict = image_size
lowercase__ : Tuple = patch_size
lowercase__ : str = num_channels
lowercase__ : Dict = is_training
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = hidden_size
lowercase__ : int = num_hidden_layers
lowercase__ : int = backbone_out_indices
lowercase__ : List[str] = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : List[Any] = initializer_range
lowercase__ : Optional[int] = num_labels
lowercase__ : Optional[int] = backbone_featmap_shape
lowercase__ : int = scope
lowercase__ : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowercase__ : List[str] = (image_size // patch_size) ** 2
lowercase__ : Tuple = num_patches + 1
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Optional[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__lowerCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
lowercase__ : Optional[int] = DPTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
lowercase__ : Union[str, Any] = self.num_labels
lowercase__ : str = DPTForDepthEstimation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
lowercase__ : str = self.num_labels
lowercase__ : Tuple = DPTForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : Dict = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : List[str] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : str = DPTModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def _lowerCAmelCase( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def _lowerCAmelCase( self ) -> Tuple:
pass
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(__lowerCAmelCase )
lowercase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = True
if model_class in get_values(__lowerCAmelCase ):
continue
lowercase__ : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
lowercase__ : Tuple = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
lowercase__ : str = model(**__lowerCAmelCase ).loss
loss.backward()
def _lowerCAmelCase( self ) -> Tuple:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Any = False
lowercase__ : str = True
if model_class in get_values(__lowerCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
lowercase__ : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.train()
lowercase__ : str = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
lowercase__ : List[Any] = model(**__lowerCAmelCase ).loss
loss.backward()
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = _config_zero_init(__lowerCAmelCase )
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(config=__lowerCAmelCase )
# Skip the check for the backbone
lowercase__ : Union[str, Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowercase__ : List[Any] = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCAmelCase( self ) -> List[str]:
pass
@slow
def _lowerCAmelCase( self ) -> List[Any]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowercase__ : Dict = DPTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _lowerCAmelCase( self ) -> str:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str] = '''add'''
with self.assertRaises(__lowerCAmelCase ):
lowercase__ : Tuple = DPTForDepthEstimation(__lowerCAmelCase )
def __UpperCamelCase ( ):
lowercase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Optional[int] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
lowercase__ : List[Any] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(__lowerCAmelCase )
lowercase__ : Optional[Any] = prepare_img()
lowercase__ : Optional[Any] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**__lowerCAmelCase )
lowercase__ : str = outputs.predicted_depth
# verify the predicted depth
lowercase__ : Optional[Any] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __lowerCAmelCase )
lowercase__ : str = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __lowerCAmelCase , atol=1E-4 ) )
| 214
| 0
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class a :
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=9_9 , _lowerCamelCase=3_2 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=1_6 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def UpperCamelCase_ ( self ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , use_stable_embedding=_lowerCamelCase , )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowercase = OpenLlamaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowercase = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowercase = True
lowercase = OpenLlamaModel(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowercase = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , )
lowercase = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , )
lowercase = model(_lowerCamelCase , attention_mask=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowercase = OpenLlamaForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowercase = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowercase = True
lowercase = True
lowercase = OpenLlamaForCausalLM(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
# first forward pass
lowercase = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , use_cache=_lowerCamelCase , )
lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , output_hidden_states=_lowerCamelCase , )['hidden_states'][0]
lowercase = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , output_hidden_states=_lowerCamelCase , )['hidden_states'][0]
# select random slice
lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 ) )
def UpperCamelCase_ ( self ):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( a_, a_, a_, unittest.TestCase ):
UpperCAmelCase_ : Optional[Any] =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
UpperCAmelCase_ : Optional[int] =(OpenLlamaForCausalLM,) if is_torch_available() else ()
UpperCAmelCase_ : Optional[int] =(
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ : List[str] =False
UpperCAmelCase_ : Optional[int] =False
def UpperCamelCase_ ( self ):
lowercase = OpenLlamaModelTester(self )
lowercase = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=3_7 )
def UpperCamelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(_lowerCamelCase )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = OpenLlamaForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowercase = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'single_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(_lowerCamelCase )
lowercase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase = OpenLlamaForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowercase = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ ( self ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = 'multi_label_classification'
lowercase = input_dict['input_ids']
lowercase = input_ids.ne(1 ).to(_lowerCamelCase )
lowercase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase = OpenLlamaForSequenceClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowercase = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase_ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ids_tensor([1, 1_0] , config.vocab_size )
lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowercase = OpenLlamaModel(_lowerCamelCase )
original_model.to(_lowerCamelCase )
original_model.eval()
lowercase = original_model(_lowerCamelCase ).last_hidden_state
lowercase = original_model(_lowerCamelCase ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowercase = {'type': scaling_type, 'factor': 1_0.0}
lowercase = OpenLlamaModel(_lowerCamelCase )
scaled_model.to(_lowerCamelCase )
scaled_model.eval()
lowercase = scaled_model(_lowerCamelCase ).last_hidden_state
lowercase = scaled_model(_lowerCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5 ) )
| 220
|
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a ( a_ ):
UpperCAmelCase_ : List[Any] =["image_processor", "tokenizer"]
UpperCAmelCase_ : str ="AutoImageProcessor"
UpperCAmelCase_ : Any ="AutoTokenizer"
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowerCamelCase , )
lowercase = kwargs.pop('feature_extractor' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_lowerCamelCase , _lowerCamelCase )
lowercase = self.image_processor
lowercase = False
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
lowercase = kwargs.pop('images' , _lowerCamelCase )
lowercase = kwargs.pop('text' , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowercase = args[0]
lowercase = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
lowercase = self.image_processor(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if text is not None:
lowercase = self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowercase = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def UpperCamelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def UpperCamelCase_ ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
lowercase = True
lowercase = self.tokenizer
yield
lowercase = self.image_processor
lowercase = False
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=None ):
if added_vocab is None:
lowercase = self.tokenizer.get_added_vocab()
lowercase = {}
while tokens:
lowercase = re.search(R'<s_(.*?)>' , _lowerCamelCase , re.IGNORECASE )
if start_token is None:
break
lowercase = start_token.group(1 )
lowercase = re.search(RF'</s_{key}>' , _lowerCamelCase , re.IGNORECASE )
lowercase = start_token.group()
if end_token is None:
lowercase = tokens.replace(_lowerCamelCase , '' )
else:
lowercase = end_token.group()
lowercase = re.escape(_lowerCamelCase )
lowercase = re.escape(_lowerCamelCase )
lowercase = re.search(F'{start_token_escaped}(.*?){end_token_escaped}' , _lowerCamelCase , re.IGNORECASE )
if content is not None:
lowercase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowercase = self.tokenajson(_lowerCamelCase , is_inner_value=_lowerCamelCase , added_vocab=_lowerCamelCase )
if value:
if len(_lowerCamelCase ) == 1:
lowercase = value[0]
lowercase = value
else: # leaf nodes
lowercase = []
for leaf in content.split(R'<sep/>' ):
lowercase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowercase = leaf[1:-2] # for categorical special tokens
output[key].append(_lowerCamelCase )
if len(output[key] ) == 1:
lowercase = output[key][0]
lowercase = tokens[tokens.find(_lowerCamelCase ) + len(_lowerCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_lowerCamelCase , added_vocab=_lowerCamelCase )
if len(_lowerCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase_ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowerCamelCase , )
return self.image_processor_class
@property
def UpperCamelCase_ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowerCamelCase , )
return self.image_processor
| 220
| 1
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
a =[
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : bool ,SCREAMING_SNAKE_CASE__ : str = None ,SCREAMING_SNAKE_CASE__ : list = None):
__lowerCamelCase : List[Any] = None
__lowerCamelCase : Optional[Any] = os.path.abspath(os.path.join('examples' ,'by_feature'))
__lowerCamelCase : Tuple = os.path.abspath('examples')
for item in os.listdir(SCREAMING_SNAKE_CASE__):
if item not in EXCLUDE_EXAMPLES:
__lowerCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if os.path.isfile(SCREAMING_SNAKE_CASE__) and ".py" in item_path:
with self.subTest(
tested_script=SCREAMING_SNAKE_CASE__ ,feature_script=SCREAMING_SNAKE_CASE__ ,tested_section='main()' if parser_only else 'training_function()' ,):
__lowerCamelCase : Dict = compare_against_test(
os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = '\n'.join(SCREAMING_SNAKE_CASE__)
if special_strings is not None:
for string in special_strings:
__lowerCamelCase : Optional[int] = diff.replace(SCREAMING_SNAKE_CASE__ ,'')
self.assertEqual(SCREAMING_SNAKE_CASE__ ,'')
def lowerCAmelCase ( self : Any):
self.one_complete_example('complete_nlp_example.py' ,SCREAMING_SNAKE_CASE__)
self.one_complete_example('complete_nlp_example.py' ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Dict = os.path.abspath(os.path.join('examples' ,'cv_example.py'))
__lowerCamelCase : Optional[int] = [
' ' * 1_6 + '{\n\n',
' ' * 2_0 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 2_0 + '"f1": eval_metric["f1"],\n\n',
' ' * 2_0 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 2_0 + '"epoch": epoch,\n\n',
' ' * 1_6 + '},\n\n',
' ' * 1_6 + 'step=epoch,\n',
' ' * 1_2,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.one_complete_example('complete_cv_example.py' ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[Any] = False
@classmethod
def lowerCAmelCase ( cls : Any):
super().setUpClass()
__lowerCamelCase : str = tempfile.mkdtemp()
__lowerCamelCase : Dict = os.path.join(cls._tmpdir ,'default_config.yml')
write_basic_config(save_location=cls.configPath)
__lowerCamelCase : List[str] = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def lowerCAmelCase ( cls : Optional[Any]):
super().tearDownClass()
shutil.rmtree(cls._tmpdir)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : List[Any] = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir ,'epoch_0')))
def lowerCAmelCase ( self : Any):
__lowerCamelCase : int = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
__lowerCamelCase : int = run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir ,'step_2')))
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Dict = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir ,'epoch_0')}\n ".split()
__lowerCamelCase : Any = run_command(self._launch_args + testargs ,return_stdout=SCREAMING_SNAKE_CASE__)
self.assertNotIn('epoch 0:' ,SCREAMING_SNAKE_CASE__)
self.assertIn('epoch 1:' ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Any = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir ,'step_2')}\n ".split()
__lowerCamelCase : Optional[int] = run_command(self._launch_args + testargs ,return_stdout=SCREAMING_SNAKE_CASE__)
if torch.cuda.is_available():
__lowerCamelCase : Dict = torch.cuda.device_count()
else:
__lowerCamelCase : List[Any] = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' ,SCREAMING_SNAKE_CASE__)
self.assertIn('epoch 1:' ,SCREAMING_SNAKE_CASE__)
else:
self.assertIn('epoch 0:' ,SCREAMING_SNAKE_CASE__)
self.assertIn('epoch 1:' ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : int):
__lowerCamelCase : Any = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ ,{'TESTING_MOCKED_DATALOADERS': '0'}):
__lowerCamelCase : Optional[int] = run_command(self._launch_args + testargs ,return_stdout=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = re.findall('({.+})' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = [r for r in results if 'accuracy' in r][-1]
__lowerCamelCase : List[str] = ast.literal_eval(SCREAMING_SNAKE_CASE__)
self.assertGreaterEqual(results['accuracy'] ,0.75)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : str = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs)
@require_trackers
@mock.patch.dict(os.environ ,{'WANDB_MODE': 'offline'})
def lowerCAmelCase ( self : str):
with tempfile.TemporaryDirectory() as tmpdir:
__lowerCamelCase : Any = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ ,'tracking')))
def lowerCAmelCase ( self : str):
__lowerCamelCase : Dict = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs)
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : str = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs)
| 113
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Optional[Any]):
# A mock response for an HTTP head request to emulate server down
__lowerCamelCase : List[Any] = mock.Mock()
__lowerCamelCase : Tuple = 5_0_0
__lowerCamelCase : Dict = {}
__lowerCamelCase : List[Any] = HTTPError
__lowerCamelCase : int = {}
# Download this model to make sure it's in the cache.
__lowerCamelCase : Optional[int] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=SCREAMING_SNAKE_CASE__) as mock_head:
__lowerCamelCase : Tuple = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase ( self : Any):
# A mock response for an HTTP head request to emulate server down
__lowerCamelCase : int = mock.Mock()
__lowerCamelCase : List[str] = 5_0_0
__lowerCamelCase : Tuple = {}
__lowerCamelCase : List[str] = HTTPError
__lowerCamelCase : int = {}
# Download this model to make sure it's in the cache.
__lowerCamelCase : Dict = GPTaTokenizerFast.from_pretrained('gpt2')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' ,return_value=SCREAMING_SNAKE_CASE__) as mock_head:
__lowerCamelCase : Optional[int] = GPTaTokenizerFast.from_pretrained('gpt2')
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : str):
# This test is for deprecated behavior and can be removed in v5
try:
__lowerCamelCase : Tuple = tempfile.mktemp()
with open(SCREAMING_SNAKE_CASE__ ,'wb') as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = AlbertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__)
finally:
os.remove(SCREAMING_SNAKE_CASE__)
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json'):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' ,'wb') as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size ,1_0_0_0)
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json')
def lowerCAmelCase ( self : Optional[Any]):
# This test is for deprecated behavior and can be removed in v5
__lowerCamelCase : str = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model')
@is_staging_test
class A_ ( unittest.TestCase ):
_UpperCAmelCase : Any = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def lowerCAmelCase ( cls : Optional[int]):
__lowerCamelCase : Optional[int] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE__)
@classmethod
def lowerCAmelCase ( cls : str):
try:
delete_repo(token=cls._token ,repo_id='test-tokenizer')
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-tokenizer-org')
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-tokenizer')
except HTTPError:
pass
def lowerCAmelCase ( self : List[str]):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ ,'vocab.txt')
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
__lowerCamelCase : Union[str, Any] = BertTokenizer(SCREAMING_SNAKE_CASE__)
tokenizer.push_to_hub('test-tokenizer' ,use_auth_token=self._token)
__lowerCamelCase : List[str] = BertTokenizer.from_pretrained(F"{USER}/test-tokenizer")
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab)
# Reset repo
delete_repo(token=self._token ,repo_id='test-tokenizer')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ ,repo_id='test-tokenizer' ,push_to_hub=SCREAMING_SNAKE_CASE__ ,use_auth_token=self._token)
__lowerCamelCase : str = BertTokenizer.from_pretrained(F"{USER}/test-tokenizer")
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab)
def lowerCAmelCase ( self : Any):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ ,'vocab.txt')
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
__lowerCamelCase : Optional[Any] = BertTokenizer(SCREAMING_SNAKE_CASE__)
tokenizer.push_to_hub('valid_org/test-tokenizer-org' ,use_auth_token=self._token)
__lowerCamelCase : List[Any] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org')
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab)
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-tokenizer-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE__ ,repo_id='valid_org/test-tokenizer-org' ,push_to_hub=SCREAMING_SNAKE_CASE__ ,use_auth_token=self._token)
__lowerCamelCase : List[Any] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org')
self.assertDictEqual(new_tokenizer.vocab ,tokenizer.vocab)
@require_tokenizers
def lowerCAmelCase ( self : int):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE__ ,'vocab.txt')
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
__lowerCamelCase : Any = CustomTokenizer(SCREAMING_SNAKE_CASE__)
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token)
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(F"{USER}/test-dynamic-tokenizer" ,trust_remote_code=SCREAMING_SNAKE_CASE__)
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer')
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ ,'vocab.txt')
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
__lowerCamelCase : Dict = BertTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE__)
bert_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = CustomTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE__)
tokenizer.push_to_hub('test-dynamic-tokenizer' ,use_auth_token=self._token)
__lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(F"{USER}/test-dynamic-tokenizer" ,trust_remote_code=SCREAMING_SNAKE_CASE__)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizerFast')
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(
F"{USER}/test-dynamic-tokenizer" ,use_fast=SCREAMING_SNAKE_CASE__ ,trust_remote_code=SCREAMING_SNAKE_CASE__)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ ,'CustomTokenizer')
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[str] = Trie()
trie.add('Hello 友達')
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}})
trie.add('Hello')
trie.data
self.assertEqual(trie.data ,{'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}})
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : str = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100') ,['[CLS] This is a extra_id_100'])
trie.add('[CLS]')
trie.add('extra_id_1')
trie.add('extra_id_100')
self.assertEqual(trie.split('[CLS] This is a extra_id_100') ,['[CLS]', ' This is a ', 'extra_id_100'])
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Dict = Trie()
trie.add('A')
self.assertEqual(trie.split('ABC') ,['A', 'BC'])
self.assertEqual(trie.split('BCA') ,['BC', 'A'])
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Any = Trie()
trie.add('TOKEN]')
trie.add('[SPECIAL_TOKEN]')
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]') ,['This is something ', '[SPECIAL_TOKEN]'])
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = Trie()
trie.add('A')
trie.add('P')
trie.add('[SPECIAL_TOKEN]')
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]') ,['This is something ', '[SPECIAL_TOKEN]'])
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Any = Trie()
trie.add('AB')
trie.add('B')
trie.add('C')
self.assertEqual(trie.split('ABC') ,['AB', 'C'])
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Dict = Trie()
trie.add('ABC')
trie.add('B')
trie.add('CD')
self.assertEqual(trie.split('ABCD') ,['ABC', 'D'])
def lowerCAmelCase ( self : List[Any]):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
__lowerCamelCase : List[Any] = Trie()
__lowerCamelCase : Any = trie.cut_text('ABC' ,[0, 0, 2, 1, 2, 3])
self.assertEqual(SCREAMING_SNAKE_CASE__ ,['AB', 'C'])
| 113
| 1
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase )
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = field(default="""text-classification""", metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCamelCase__ = Features({"""text""": Value("""string""" )} )
lowerCamelCase__ = Features({"""labels""": ClassLabel} )
lowerCamelCase__ = "text"
lowerCamelCase__ = "labels"
def A_ ( self , lowercase ):
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
_lowerCamelCase : Optional[Any] = copy.deepcopy(self )
_lowerCamelCase : List[str] = self.label_schema.copy()
_lowerCamelCase : Optional[int] = features[self.label_column]
_lowerCamelCase : Optional[int] = label_schema
return task_template
@property
def A_ ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 96
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 96
| 1
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a ( _SCREAMING_SNAKE_CASE ):
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
super().__init__()
self.register_modules(vqvae=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ )
@torch.no_grad()
def __call__( self , __magic_name__ = 1 , __magic_name__ = None , __magic_name__ = 0.0 , __magic_name__ = 50 , __magic_name__ = "pil" , __magic_name__ = True , **__magic_name__ , ) -> Union[Tuple, ImagePipelineOutput]:
_a = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__magic_name__ , )
_a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_a = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__magic_name__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
_a = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_a = {}
if accepts_eta:
_a = eta
for t in self.progress_bar(self.scheduler.timesteps ):
_a = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
_a = self.unet(__magic_name__ , __magic_name__ ).sample
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
# decode the image latents with the VAE
_a = self.vqvae.decode(__magic_name__ ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_a = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__magic_name__ )
| 354
|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict ) -> Optional[int]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_a = np.full((len(lowerCAmelCase__ ), sequence_length, 2) , lowerCAmelCase__ )
else:
_a = np.full((len(lowerCAmelCase__ ), sequence_length) , lowerCAmelCase__ )
for i, tensor in enumerate(lowerCAmelCase__ ):
if padding_side == "right":
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_a = tensor[:sequence_length]
else:
_a = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_a = tensor[:sequence_length]
else:
_a = tensor[:sequence_length]
return out_tensor.tolist()
def _A (lowerCAmelCase__ :Any ) -> Union[str, Any]:
'''simple docstring'''
_a = ord(lowerCAmelCase__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_a = unicodedata.category(lowerCAmelCase__ )
if cat.startswith('P' ):
return True
return False
@dataclass
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 42
_lowerCAmelCase = True
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = -1_0_0
_lowerCAmelCase = "pt"
def __UpperCAmelCase ( self , __magic_name__ ) -> Any:
import torch
_a = 'label' if 'label' in features[0].keys() else 'labels'
_a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_a = self.tokenizer.pad(
__magic_name__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' if labels is None else None , )
if labels is None:
return batch
_a = torch.tensor(batch['entity_ids'] ).shape[1]
_a = self.tokenizer.padding_side
if padding_side == "right":
_a = [
list(__magic_name__ ) + [self.label_pad_token_id] * (sequence_length - len(__magic_name__ )) for label in labels
]
else:
_a = [
[self.label_pad_token_id] * (sequence_length - len(__magic_name__ )) + list(__magic_name__ ) for label in labels
]
_a = [feature['ner_tags'] for feature in features]
_a = padding_tensor(__magic_name__ , -1 , __magic_name__ , __magic_name__ )
_a = [feature['original_entity_spans'] for feature in features]
_a = padding_tensor(__magic_name__ , (-1, -1) , __magic_name__ , __magic_name__ )
_a = {k: torch.tensor(__magic_name__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 104
| 0
|
'''simple docstring'''
from __future__ import annotations
def __a ( UpperCAmelCase , UpperCAmelCase ) ->list[str]:
"""simple docstring"""
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
A = number_of_bytes // partitions
A = []
for i in range(UpperCAmelCase ):
A = i * bytes_per_partition + 1
A = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258
|
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def __a ( ) ->None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 258
| 1
|
import os
import time
import numpy as np
import onnxruntime as ort
snake_case : int = "1"
snake_case : int = "0"
snake_case : int = "1"
snake_case : Tuple = ort.SessionOptions()
snake_case : Optional[Any] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("Create inference session...")
snake_case : Dict = ["TensorrtExecutionProvider", "CUDAExecutionProvider"]
snake_case : Union[str, Any] = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider)
snake_case : Optional[Any] = ort.RunOptions()
snake_case : List[str] = 128
snake_case : List[str] = 1
snake_case : List[str] = np.ones((batch, sequence), dtype=np.intaa)
snake_case : List[str] = np.ones((batch, sequence), dtype=np.intaa)
snake_case : Union[str, Any] = np.ones((batch, sequence), dtype=np.intaa)
print("Warm up phase...")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Start inference...")
snake_case : str = time.time()
snake_case : Tuple = 2_000
snake_case : Dict = {}
for iter in range(max_iters):
snake_case : Tuple = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 1_000 / max_iters))
| 41
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _snake_case ( snake_case ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , "tf_padding" ) )
self.parent.assertTrue(hasattr(_a , "depth_multiplier" ) )
class _snake_case :
def __init__( self , _a , _a=13 , _a=3 , _a=32 , _a=0.25 , _a=8 , _a=True , _a=1_024 , _a=32 , _a="relu6" , _a=0.1 , _a=0.02 , _a=True , _a=True , _a=10 , _a=None , ):
__magic_name__ : Optional[int] = parent
__magic_name__ : Union[str, Any] = batch_size
__magic_name__ : Tuple = num_channels
__magic_name__ : Tuple = image_size
__magic_name__ : Any = depth_multiplier
__magic_name__ : Any = min_depth
__magic_name__ : Any = tf_padding
__magic_name__ : int = int(last_hidden_size * depth_multiplier )
__magic_name__ : Any = output_stride
__magic_name__ : Tuple = hidden_act
__magic_name__ : Optional[int] = classifier_dropout_prob
__magic_name__ : Any = use_labels
__magic_name__ : str = is_training
__magic_name__ : List[str] = num_labels
__magic_name__ : str = initializer_range
__magic_name__ : Union[str, Any] = scope
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Any = None
__magic_name__ : Optional[Any] = None
if self.use_labels:
__magic_name__ : int = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__magic_name__ : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE ( self ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a ):
__magic_name__ : List[str] = MobileNetVaModel(config=_a )
model.to(_a )
model.eval()
__magic_name__ : Optional[int] = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a ):
__magic_name__ : Any = self.num_labels
__magic_name__ : Dict = MobileNetVaForImageClassification(_a )
model.to(_a )
model.eval()
__magic_name__ : Any = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = config_and_inputs
__magic_name__ : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = MobileNetVaModelTester(self )
__magic_name__ : List[str] = MobileNetVaConfigTester(self , config_class=_a , has_text_modality=_a )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : str = model_class(_a )
__magic_name__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple = [*signature.parameters.keys()]
__magic_name__ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE ( self ):
def check_hidden_states_output(_a , _a , _a ):
__magic_name__ : Dict = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__magic_name__ : Tuple = model(**self._prepare_for_class(_a , _a ) )
__magic_name__ : Optional[Any] = outputs.hidden_states
__magic_name__ : Optional[Any] = 26
self.assertEqual(len(_a ) , _a )
__magic_name__ , __magic_name__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Tuple = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Dict = True
check_hidden_states_output(_a , _a , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : List[str] = MobileNetVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
__magic_name__ : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self ):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(_a )
__magic_name__ : Dict = self.default_image_processor
__magic_name__ : List[str] = prepare_img()
__magic_name__ : Optional[Any] = image_processor(images=_a , return_tensors="pt" ).to(_a )
# forward pass
with torch.no_grad():
__magic_name__ : Union[str, Any] = model(**_a )
# verify the logits
__magic_name__ : List[str] = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , _a )
__magic_name__ : Union[str, Any] = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 41
| 1
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,)
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_5_0, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : str ) -> str:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='''utf-8''' ,check=_snake_case ,)
assert hasattr(self ,'''env''' )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Any ) -> str:
"""simple docstring"""
lowercase__ : Any = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
lowercase__ : List[str] = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=_snake_case ,instance_count=_snake_case ,instance_type=self.instance_type ,debugger_hook_config=_snake_case ,hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=_snake_case ,py_version='''py36''' ,)
def UpperCAmelCase ( self : Dict ,_snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
TrainingJobAnalytics(_snake_case ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def UpperCAmelCase ( self : List[Any] ,_snake_case : int ) -> int:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.create_estimator(_snake_case )
# run training
estimator.fit()
# result dataframe
lowercase__ : List[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
lowercase__ : str = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase__ : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' ,999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" ,'''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} ,_snake_case )
| 16
|
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowercase__ : str = set()
# Replace all the whitespace in our sentence
lowercase__ : Tuple = input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 26
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowercase__ : Dict = [False] * 26
for char in input_str:
if char.islower():
lowercase__ : List[Any] = True
elif char.isupper():
lowercase__ : Optional[Any] = True
return all(SCREAMING_SNAKE_CASE_ )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def snake_case__ ( ):
'''simple docstring'''
from timeit import timeit
lowercase__ : Union[str, Any] = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('is_pangram_faster()' , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit('is_pangram_fastest()' , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 214
| 0
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( a_: list[int] ):
if not nums:
return 0
_UpperCAmelCase : int = nums[0]
_UpperCAmelCase : Dict = 0
for num in nums[1:]:
_UpperCAmelCase : Any = (
max_excluding + num,
max(a_, a_ ),
)
return max(a_, a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368
|
'''simple docstring'''
import baseaa
def __UpperCAmelCase ( a_: str ):
return baseaa.baaencode(string.encode("utf-8" ) )
def __UpperCAmelCase ( a_: bytes ):
return baseaa.baadecode(a_ ).decode("utf-8" )
if __name__ == "__main__":
__a = 'Hello World!'
__a = baseaa_encode(test)
print(encoded)
__a = baseaa_decode(encoded)
print(decoded)
| 17
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = ShapEImgaImgPipeline
SCREAMING_SNAKE_CASE_ : List[str] = ["""image"""]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["""image"""]
SCREAMING_SNAKE_CASE_ : Optional[int] = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
SCREAMING_SNAKE_CASE_ : Dict = False
@property
def __A ( self ) -> Optional[int]:
return 32
@property
def __A ( self ) -> Union[str, Any]:
return 32
@property
def __A ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def __A ( self ) -> Tuple:
return 8
@property
def __A ( self ) -> List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE = CLIPVisionModel(lowerCAmelCase__ )
return model
@property
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = CLIPImageProcessor(
crop_size=224 , do_center_crop=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_resize=lowerCAmelCase__ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def __A ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
SCREAMING_SNAKE_CASE = PriorTransformer(**lowerCAmelCase__ )
return model
@property
def __A ( self ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE = ShapERenderer(**lowerCAmelCase__ )
return model
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = self.dummy_prior
SCREAMING_SNAKE_CASE = self.dummy_image_encoder
SCREAMING_SNAKE_CASE = self.dummy_image_processor
SCREAMING_SNAKE_CASE = self.dummy_renderer
SCREAMING_SNAKE_CASE = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=lowerCAmelCase__ , clip_sample=lowerCAmelCase__ , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Optional[int]:
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = output.images[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Optional[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = torch_device == 'cpu'
SCREAMING_SNAKE_CASE = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , )
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCAmelCase__ )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
SCREAMING_SNAKE_CASE = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 113
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , ) -> int:
SCREAMING_SNAKE_CASE = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
def __A ( self ) -> Optional[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ImageGPTImageProcessor if is_vision_available() else None
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = ImageGPTImageProcessingTester(self )
@property
def __A ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , 'clusters' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_normalize' ) )
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase__ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCAmelCase__ )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , 'image_processor.json' )
image_processor_first.to_json_file(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_json_file(lowerCAmelCase__ ).to_dict()
SCREAMING_SNAKE_CASE = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase__ )
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_pretrained(lowerCAmelCase__ ).to_dict()
SCREAMING_SNAKE_CASE = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase__ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def __A ( self ) -> Optional[Any]:
pass
def lowercase () -> Union[str, Any]:
SCREAMING_SNAKE_CASE = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
SCREAMING_SNAKE_CASE = Image.open(dataset[4]['file'] )
SCREAMING_SNAKE_CASE = Image.open(dataset[5]['file'] )
SCREAMING_SNAKE_CASE = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
SCREAMING_SNAKE_CASE = prepare_images()
# test non-batched
SCREAMING_SNAKE_CASE = image_processing(images[0] , return_tensors='pt' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
SCREAMING_SNAKE_CASE = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase__ )
# test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors='pt' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
SCREAMING_SNAKE_CASE = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase__ )
| 113
| 1
|
"""simple docstring"""
def a__ ( __lowercase = 400_0000 ) -> int:
_A = [0, 1]
_A = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
_A = 0
for j in range(len(__lowercase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 163
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 't5'
__UpperCamelCase = ['past_key_values']
__UpperCamelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Union[str, Any] , a__ : Optional[int]=3_21_28 , a__ : Any=5_12 , a__ : Any=64 , a__ : List[str]=20_48 , a__ : Tuple=6 , a__ : Dict=None , a__ : Optional[int]=8 , a__ : int=32 , a__ : List[str]=1_28 , a__ : Optional[Any]=0.1 , a__ : Union[str, Any]=1E-6 , a__ : Dict=1.0 , a__ : Optional[int]="relu" , a__ : Tuple=True , a__ : Any=True , a__ : Tuple=0 , a__ : Optional[Any]=1 , **a__ : Tuple , ) -> int:
'''simple docstring'''
_A = vocab_size
_A = d_model
_A = d_kv
_A = d_ff
_A = num_layers
_A = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_A = num_heads
_A = relative_attention_num_buckets
_A = relative_attention_max_distance
_A = dropout_rate
_A = layer_norm_epsilon
_A = initializer_factor
_A = feed_forward_proj
_A = use_cache
_A = self.feed_forward_proj.split("-" )
_A = act_info[-1]
_A = act_info[0] == "gated"
if len(a__ ) > 1 and act_info[0] != "gated" or len(a__ ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_A = "gelu_new"
super().__init__(
pad_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , **a__ , )
class snake_case ( _UpperCamelCase):
@property
def a_ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_A = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
_A = "past_encoder_sequence + sequence"
_A = {0: "batch"}
_A = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_A = {0: "batch", 1: "decoder_sequence"}
_A = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a__ , direction="inputs" )
return common_inputs
@property
def a_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return 13
| 163
| 1
|
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase = None ):
"""simple docstring"""
snake_case = value
snake_case = None # Added in order to delete a node easier
snake_case = None
snake_case = None
def __repr__( self ):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase = None ):
"""simple docstring"""
snake_case = root
def __str__( self ):
"""simple docstring"""
return str(self.root )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if new_children is not None: # reset its kids
snake_case = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowercase__ ): # If it is the right children
snake_case = new_children
else:
snake_case = new_children
else:
snake_case = new_children
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def snake_case ( self ):
"""simple docstring"""
return self.root is None
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = Node(lowercase__ ) # create a new Node
if self.empty(): # if Tree is empty
snake_case = new_node # set its root
else: # Tree is not empty
snake_case = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
snake_case = new_node # We insert the new node in a leaf
break
else:
snake_case = parent_node.left
else:
if parent_node.right is None:
snake_case = new_node
break
else:
snake_case = parent_node.right
snake_case = parent_node
def snake_case ( self , *lowerCAmelCase ):
"""simple docstring"""
for value in values:
self.__insert(lowercase__ )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
if self.empty():
raise IndexError('Warning: Tree is empty! please use another.' )
else:
snake_case = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
snake_case = node.left if value < node.value else node.right
return node
def snake_case ( self , lowerCAmelCase = None ):
"""simple docstring"""
if node is None:
if self.root is None:
return None
snake_case = self.root
if not self.empty():
while node.right is not None:
snake_case = node.right
return node
def snake_case ( self , lowerCAmelCase = None ):
"""simple docstring"""
if node is None:
snake_case = self.root
if self.root is None:
return None
if not self.empty():
snake_case = self.root
while node.left is not None:
snake_case = node.left
return node
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.search(lowercase__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowercase__ , lowercase__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowercase__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowercase__ , node.left )
else:
snake_case = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
snake_case = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def snake_case ( self , lowerCAmelCase=None ):
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if node:
self.inorder(lowercase__ , node.left )
arr.append(node.value )
self.inorder(lowercase__ , node.right )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = []
self.inorder(lowercase__ , lowercase__ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> int:
"""simple docstring"""
snake_case = []
if curr_node is not None:
snake_case = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCAmelCase__ ( ) -> List[Any]:
"""simple docstring"""
snake_case = (8, 3, 6, 1, 1_0, 1_4, 1_3, 4, 7)
snake_case = BinarySearchTree()
for i in testlist:
t.insert(A__ )
# Prints all the elements of the list in order traversal
print(A__ )
if t.search(6 ) is not None:
print('The value 6 exists' )
else:
print('The value 6 doesn\'t exist' )
if t.search(-1 ) is not None:
print('The value -1 exists' )
else:
print('The value -1 doesn\'t exist' )
if not t.empty():
print('Max Value: ' , t.get_max().value ) # type: ignore
print('Min Value: ' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(A__ )
print(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 150
|
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KT''')
lowerCAmelCase__ = TypeVar('''VT''')
class lowercase_ (Generic[KT, VT] ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : KT | str = "root" ,lowercase__ : VT | None = None ):
__lowercase = key
__lowercase = value
__lowercase = []
def __repr__( self : Tuple ):
return F"Node({self.key}: {self.value})"
@property
def SCREAMING_SNAKE_CASE ( self : int ):
return len(self.forward )
class lowercase_ (Generic[KT, VT] ):
"""simple docstring"""
def __init__( self : int ,lowercase__ : float = 0.5 ,lowercase__ : int = 1_6 ):
__lowercase = Node[KT, VT]()
__lowercase = 0
__lowercase = p
__lowercase = max_level
def __str__( self : List[str] ):
__lowercase = list(self )
if len(lowercase__ ) == 0:
return F"SkipList(level={self.level})"
__lowercase = max((len(str(lowercase__ ) ) for item in items) ,default=4 )
__lowercase = max(lowercase__ ,4 ) + 4
__lowercase = self.head
__lowercase = []
__lowercase = node.forward.copy()
lines.append(F"[{node.key}]".ljust(lowercase__ ,'''-''' ) + '''* ''' * len(lowercase__ ) )
lines.append(''' ''' * label_size + '''| ''' * len(lowercase__ ) )
while len(node.forward ) != 0:
__lowercase = node.forward[0]
lines.append(
F"[{node.key}]".ljust(lowercase__ ,'''-''' )
+ ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) )
lines.append(''' ''' * label_size + '''| ''' * len(lowercase__ ) )
__lowercase = node.forward
lines.append('''None'''.ljust(lowercase__ ) + '''* ''' * len(lowercase__ ) )
return F"SkipList(level={self.level})\n" + "\n".join(lowercase__ )
def __iter__( self : List[str] ):
__lowercase = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
__lowercase = node.forward[0]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : str ):
__lowercase = []
__lowercase = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__lowercase = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(lowercase__ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : KT ):
__lowercase , __lowercase = self._locate_node(lowercase__ )
if node is not None:
for i, update_node in enumerate(lowercase__ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__lowercase = node.forward[i]
else:
__lowercase = update_node.forward[:i]
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : KT ,lowercase__ : VT ):
__lowercase , __lowercase = self._locate_node(lowercase__ )
if node is not None:
__lowercase = value
else:
__lowercase = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 ,lowercase__ ):
update_vector.append(self.head )
__lowercase = level
__lowercase = Node(lowercase__ ,lowercase__ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(lowercase__ )
else:
__lowercase = new_node
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : VT ):
__lowercase , __lowercase = self._locate_node(lowercase__ )
if node is not None:
return node.value
return None
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
__lowercase = skip_list.head
__lowercase = {}
while node.level != 0:
__lowercase = node.forward[0]
__lowercase = node.value
assert len(A__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
__lowercase = skip_list.head
__lowercase = {}
while node.level != 0:
__lowercase = node.forward[0]
__lowercase = node.value
if len(A__ ) != 4:
print()
assert len(A__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
assert skip_list.find('''Some key''' ) is None
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 142 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(A__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(A__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _A ( ):
"""simple docstring"""
def is_sorted(A__ ):
return all(next_item >= item for item, next_item in zip(A__ , lst[1:] ) )
__lowercase = SkipList()
for i in range(10 ):
skip_list.insert(A__ , A__ )
assert is_sorted(list(A__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(A__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(A__ ) )
def _A ( ):
"""simple docstring"""
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _A ( ):
"""simple docstring"""
__lowercase = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 104
| 0
|
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = CustomTokenizer
pass
| 312
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(UpperCAmelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
def UpperCAmelCase ( ) -> None:
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423]
snake_case_ = math.log(len(UpperCAmelCase ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 312
| 1
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[Any] ='''pt'''
elif is_tf_available():
_A : Any ='''tf'''
else:
_A : List[str] ='''jax'''
class _lowercase ( _lowercase , unittest.TestCase ):
a = ByTaTokenizer
a = False
def lowerCamelCase_ ( self: str ):
super().setUp()
lowerCamelCase__ : str = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self: Optional[int] ):
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def lowerCamelCase_ ( self: Any , **UpperCamelCase__: Tuple ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Any=False , UpperCamelCase__: Union[str, Any]=20 , UpperCamelCase__: Optional[int]=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCamelCase__ : List[str] = []
for i in range(len(UpperCamelCase__ ) ):
try:
lowerCamelCase__ : Tuple = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda UpperCamelCase__ : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , UpperCamelCase__ ) )
lowerCamelCase__ : Tuple = list(filter(lambda UpperCamelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase__ ) , UpperCamelCase__ ) )
if max_length is not None and len(UpperCamelCase__ ) > max_length:
lowerCamelCase__ : Dict = toks[:max_length]
if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0:
while len(UpperCamelCase__ ) < min_length:
lowerCamelCase__ : Optional[Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Union[str, Any] = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
if " " not in output_txt and len(UpperCamelCase__ ) > 1:
lowerCamelCase__ : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ )
)
if with_prefix_space:
lowerCamelCase__ : str = """ """ + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
return output_txt, output_ids
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : str = self.ta_base_tokenizer
lowerCamelCase__ : Union[str, Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
lowerCamelCase__ : Optional[int] = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[Any] = self.ta_base_tokenizer
lowerCamelCase__ : Dict = """Unicode €."""
lowerCamelCase__ : List[Any] = tokenizer(UpperCamelCase__ )
lowerCamelCase__ : List[str] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , UpperCamelCase__ )
# decoding
lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , """Unicode €.</s>""" )
lowerCamelCase__ : List[Any] = tokenizer("""e è é ê ë""" )
lowerCamelCase__ : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , UpperCamelCase__ )
# decoding
lowerCamelCase__ : str = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.ta_base_tokenizer
lowerCamelCase__ : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
lowerCamelCase__ : List[str] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowerCamelCase__ : int = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
if FRAMEWORK != "jax":
lowerCamelCase__ : Any = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : List[str] = self.ta_base_tokenizer
lowerCamelCase__ : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , UpperCamelCase__ )
self.assertIn("""attention_mask""" , UpperCamelCase__ )
self.assertNotIn("""decoder_input_ids""" , UpperCamelCase__ )
self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : str = self.ta_base_tokenizer
lowerCamelCase__ : List[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
lowerCamelCase__ : Union[str, Any] = tokenizer(
text_target=UpperCamelCase__ , max_length=32 , padding="""max_length""" , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.ta_base_tokenizer
lowerCamelCase__ : str = ["""A long paragraph for summarization. </s>"""]
lowerCamelCase__ : Optional[Any] = ["""Summary of the text. </s>"""]
# fmt: off
lowerCamelCase__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowerCamelCase__ : Any = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowerCamelCase__ : Any = tokenizer(UpperCamelCase__ , text_target=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch["""input_ids"""][0] )
self.assertEqual(UpperCamelCase__ , batch["""labels"""][0] )
def lowerCamelCase_ ( self: Optional[int] ):
# safety check on max_len default value so we are sure the test works
lowerCamelCase__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : int = tempfile.mkdtemp()
lowerCamelCase__ : List[str] = """ He is very happy, UNwant\u00E9d,running"""
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
shutil.rmtree(UpperCamelCase__ )
lowerCamelCase__ : Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Optional[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
lowerCamelCase__ : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : int = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase__ : Any = tokenizer.__class__.from_pretrained(UpperCamelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCamelCase__ : Union[str, Any] = json.load(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = [F'''<extra_id_{i}>''' for i in range(125 )]
lowerCamelCase__ : int = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(UpperCamelCase__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
UpperCamelCase__ , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=UpperCamelCase__ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : str = tokenizer_class.from_pretrained(UpperCamelCase__ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
def lowerCamelCase_ ( self: str ):
pass
def lowerCamelCase_ ( self: List[str] ):
pass
def lowerCamelCase_ ( self: Optional[int] ):
pass
def lowerCamelCase_ ( self: int ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
lowerCamelCase__ : Dict = self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : str = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
lowerCamelCase__ : str = 0
lowerCamelCase__ : Any = tokenizer.convert_ids_to_tokens(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
for attr in attributes_list:
setattr(UpperCamelCase__ , attr + """_id""" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + """_id""" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , attr + """_id""" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + """_id""" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens_ids""" ) , [] )
setattr(UpperCamelCase__ , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 41
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[str] =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_A : Tuple =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
lowerCamelCase__ : List[Any] = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Any = val
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
lowerCamelCase__ : Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCamelCase__ : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
lowerCamelCase__ : Optional[int] = value
else:
lowerCamelCase__ : Any = value
return new_state_dict
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> Dict:
lowerCamelCase__ : Optional[int] = """"""
if is_panoptic:
lowerCamelCase__ : Dict = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase__ : List[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowerCamelCase__ : Union[str, Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : int = in_proj_weight[:256, :]
lowerCamelCase__ : Any = in_proj_bias[:256]
lowerCamelCase__ : str = in_proj_weight[256:512, :]
lowerCamelCase__ : Optional[int] = in_proj_bias[256:512]
lowerCamelCase__ : Dict = in_proj_weight[-256:, :]
lowerCamelCase__ : str = in_proj_bias[-256:]
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ : Optional[Any] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
lowerCamelCase__ : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowerCamelCase__ : Any = """resnet101"""
if "dc5" in model_name:
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : int = """panoptic""" in model_name
if is_panoptic:
lowerCamelCase__ : List[str] = 250
else:
lowerCamelCase__ : int = 91
lowerCamelCase__ : int = """huggingface/label-files"""
lowerCamelCase__ : List[str] = """coco-detection-id2label.json"""
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase__ : Any = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : str = idalabel
lowerCamelCase__ : List[str] = {v: k for k, v in idalabel.items()}
# load image processor
lowerCamelCase__ : Optional[int] = """coco_panoptic""" if is_panoptic else """coco_detection"""
lowerCamelCase__ : int = ConditionalDetrImageProcessor(format=UpperCamelCase )
# prepare image
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : int = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
lowerCamelCase__ : Optional[Any] = encoding["""pixel_values"""]
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
lowerCamelCase__ : List[Any] = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase , pretrained=UpperCamelCase ).eval()
lowerCamelCase__ : Dict = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowerCamelCase__ : Optional[Any] = """conditional_detr.""" + src
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Dict = rename_backbone_keys(UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase , is_panoptic=UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase__ : Dict = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
lowerCamelCase__ : int = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCamelCase__ : List[str] = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Any = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
lowerCamelCase__ : int = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Tuple = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
lowerCamelCase__ : Union[str, Any] = state_dict.pop(UpperCamelCase )
lowerCamelCase__ : Dict = val
# finally, create HuggingFace model and load state dict
lowerCamelCase__ : Tuple = ConditionalDetrForSegmentation(UpperCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
model.push_to_hub(repo_id=UpperCamelCase , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
lowerCamelCase__ : Optional[Any] = conditional_detr(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = model(UpperCamelCase )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_A : Optional[Any] =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 41
| 1
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int=12 , SCREAMING_SNAKE_CASE : Tuple=7 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Optional[int]=99 , SCREAMING_SNAKE_CASE : Optional[Any]=32 , SCREAMING_SNAKE_CASE : Union[str, Any]=32 , SCREAMING_SNAKE_CASE : str=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=4 , SCREAMING_SNAKE_CASE : str=37 , SCREAMING_SNAKE_CASE : Dict=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=5_12 , SCREAMING_SNAKE_CASE : Tuple=0.0_2 , SCREAMING_SNAKE_CASE : List[str]=0 , SCREAMING_SNAKE_CASE : Tuple=None , ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = parent
UpperCamelCase__ : Optional[Any] = batch_size
UpperCamelCase__ : Any = seq_length
UpperCamelCase__ : Union[str, Any] = is_training
UpperCamelCase__ : Optional[int] = use_input_mask
UpperCamelCase__ : Dict = use_labels
UpperCamelCase__ : Dict = vocab_size
UpperCamelCase__ : Union[str, Any] = hidden_size
UpperCamelCase__ : Tuple = projection_dim
UpperCamelCase__ : List[Any] = num_hidden_layers
UpperCamelCase__ : Optional[int] = num_attention_heads
UpperCamelCase__ : Optional[Any] = intermediate_size
UpperCamelCase__ : Any = dropout
UpperCamelCase__ : Union[str, Any] = attention_dropout
UpperCamelCase__ : int = max_position_embeddings
UpperCamelCase__ : List[str] = initializer_range
UpperCamelCase__ : Union[str, Any] = scope
UpperCamelCase__ : Any = bos_token_id
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : int = None
if self.use_input_mask:
UpperCamelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCamelCase__ : Any = input_mask.numpy()
UpperCamelCase__ , UpperCamelCase__ : List[str] = input_mask.shape
UpperCamelCase__ : Any = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[str] = 1
UpperCamelCase__ : Dict = 0
UpperCamelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict ):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = TFBlipTextModel(config=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : str = config_and_inputs
UpperCamelCase__ : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __a ( A__ , unittest.TestCase ):
_lowerCAmelCase : Any = (TFBlipTextModel,) if is_tf_available() else ()
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : int = False
_lowerCAmelCase : List[Any] = False
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : int = BlipTextModelTester(self )
UpperCamelCase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowercase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def __lowercase ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Optional[Any] = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Tuple=True ):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE )
| 196
|
lowerCamelCase : Optional[int] ={
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
UpperCamelCase__ : Optional[Any] = set()
# keep track of all the paths to be checked
UpperCamelCase__ : Optional[Any] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCamelCase__ : int = queue.pop(0 )
# get the last node from the path
UpperCamelCase__ : Dict = path[-1]
if node not in explored:
UpperCamelCase__ : Tuple = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCamelCase__ : List[str] = list(__lowerCAmelCase )
new_path.append(__lowerCAmelCase )
queue.append(__lowerCAmelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__lowerCAmelCase )
# in case there's no path between the 2 nodes
return []
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCamelCase__ : Tuple = [start]
UpperCamelCase__ : Optional[int] = set(__lowerCAmelCase )
# Keep tab on distances from `start` node.
UpperCamelCase__ : str = {start: 0, target: -1}
while queue:
UpperCamelCase__ : Any = queue.pop(0 )
if node == target:
UpperCamelCase__ : Union[str, Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__lowerCAmelCase )
queue.append(__lowerCAmelCase )
UpperCamelCase__ : List[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 196
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _A ( unittest.TestCase ):
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = tempfile.mkdtemp()
__UpperCAmelCase : Tuple = BlipImageProcessor()
__UpperCAmelCase : Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
__UpperCAmelCase : Optional[int] = BlipProcessor(UpperCAmelCase__ , UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).tokenizer
def __A ( self , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor
def __A ( self ) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__UpperCAmelCase : Dict = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : int = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__UpperCAmelCase : Dict = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
__UpperCAmelCase : Union[str, Any] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase__ )
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : List[Any] = self.get_tokenizer()
__UpperCAmelCase : Union[str, Any] = BlipProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
__UpperCAmelCase : Optional[int] = self.prepare_image_inputs()
__UpperCAmelCase : Dict = image_processor(UpperCAmelCase__ , return_tensors="""np""" )
__UpperCAmelCase : Any = processor(images=UpperCAmelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : int = self.get_image_processor()
__UpperCAmelCase : Dict = self.get_tokenizer()
__UpperCAmelCase : str = BlipProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
__UpperCAmelCase : Any = """lower newer"""
__UpperCAmelCase : Optional[int] = processor(text=UpperCAmelCase__ )
__UpperCAmelCase : Any = tokenizer(UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.get_image_processor()
__UpperCAmelCase : Any = self.get_tokenizer()
__UpperCAmelCase : Any = BlipProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
__UpperCAmelCase : Optional[int] = """lower newer"""
__UpperCAmelCase : int = self.prepare_image_inputs()
__UpperCAmelCase : List[str] = processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase__ ):
processor()
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : str = self.get_image_processor()
__UpperCAmelCase : List[Any] = self.get_tokenizer()
__UpperCAmelCase : Any = BlipProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
__UpperCAmelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : List[Any] = processor.batch_decode(UpperCAmelCase__ )
__UpperCAmelCase : str = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.get_image_processor()
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
__UpperCAmelCase : List[str] = BlipProcessor(tokenizer=UpperCAmelCase__ , image_processor=UpperCAmelCase__ )
__UpperCAmelCase : Tuple = """lower newer"""
__UpperCAmelCase : Optional[int] = self.prepare_image_inputs()
__UpperCAmelCase : str = processor(text=UpperCAmelCase__ , images=UpperCAmelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 254
|
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_a = '__DUMMY_TRANSFORMERS_USER__'
_a = 'Dummy User'
_a = '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'
_a = 'https://hub-ci.huggingface.co'
_a = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
_a = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
_a = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def _A ( UpperCamelCase_ : List[Any]) -> Tuple:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : int) -> List[Any]:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT", UpperCamelCase_)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : str) -> Dict:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : List[Any]) -> List[str]:
'''simple docstring'''
HfFolder.save_token(UpperCamelCase_)
yield
HfFolder.delete_token()
@pytest.fixture(scope="session")
def _A ( ) -> List[Any]:
'''simple docstring'''
return HfApi(endpoint=UpperCamelCase_)
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi) -> List[Any]:
'''simple docstring'''
__lowercase = HfFolder.get_token()
HfFolder.save_token(UpperCamelCase_)
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : Dict) -> int:
'''simple docstring'''
def _cleanup_repo(UpperCamelCase_ : Optional[int]):
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def _A ( UpperCamelCase_ : str) -> Any:
'''simple docstring'''
@contextmanager
def _temporary_repo(UpperCamelCase_ : Any):
try:
yield repo_id
finally:
cleanup_repo(UpperCamelCase_)
return _temporary_repo
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : str, UpperCamelCase_ : Optional[int]) -> List[Any]:
'''simple docstring'''
__lowercase = F"""repo_txt_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data/text_data.txt", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : Tuple, UpperCamelCase_ : Any, UpperCamelCase_ : Dict) -> Optional[int]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : int, UpperCamelCase_ : Optional[int]) -> int:
'''simple docstring'''
__lowercase = F"""repo_zipped_txt_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Dict, UpperCamelCase_ : Any) -> int:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> List[Any]:
'''simple docstring'''
__lowercase = F"""repo_zipped_img_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> str:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 17
| 0
|
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_lowercase : Tuple = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
_lowercase : List[Any] = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
_lowercase : Optional[Any] = r"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {\'accuracy\': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def snake_case ( self : int )-> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ), homepage='''https://github.com/hendrycks/math''', codebase_urls=['''https://github.com/hendrycks/math'''], )
def snake_case ( self : List[str], lowerCamelCase : List[str], lowerCamelCase : Optional[Any] )-> int:
lowerCamelCase__ : Dict =0.0
for i, j in zip(__A, __A ):
n_correct += 1.0 if math_equivalence.is_equiv(__A, __A ) else 0.0
lowerCamelCase__ : List[Any] =n_correct / len(__A )
return {
"accuracy": accuracy,
}
| 351
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_lowercase : Dict = logging.get_logger(__name__)
_lowercase : int = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'blenderbot-small'
_a = ['past_key_values']
_a = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Tuple, lowerCamelCase : Any=5_0265, lowerCamelCase : Optional[Any]=512, lowerCamelCase : Union[str, Any]=8, lowerCamelCase : Dict=2048, lowerCamelCase : str=16, lowerCamelCase : List[Any]=8, lowerCamelCase : List[str]=2048, lowerCamelCase : int=16, lowerCamelCase : Any=0.0, lowerCamelCase : Dict=0.0, lowerCamelCase : Tuple=True, lowerCamelCase : Tuple=True, lowerCamelCase : Optional[int]="gelu", lowerCamelCase : Tuple=512, lowerCamelCase : Tuple=0.1, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : Optional[int]=0.0, lowerCamelCase : List[str]=0.02, lowerCamelCase : Any=1, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : Any=0, lowerCamelCase : Tuple=1, lowerCamelCase : Tuple=2, lowerCamelCase : Dict=2, **lowerCamelCase : Any, )-> Dict:
lowerCamelCase__ : Dict =vocab_size
lowerCamelCase__ : Dict =max_position_embeddings
lowerCamelCase__ : Optional[Any] =d_model
lowerCamelCase__ : Union[str, Any] =encoder_ffn_dim
lowerCamelCase__ : Optional[Any] =encoder_layers
lowerCamelCase__ : Any =encoder_attention_heads
lowerCamelCase__ : Union[str, Any] =decoder_ffn_dim
lowerCamelCase__ : Optional[int] =decoder_layers
lowerCamelCase__ : Any =decoder_attention_heads
lowerCamelCase__ : Optional[int] =dropout
lowerCamelCase__ : str =attention_dropout
lowerCamelCase__ : Union[str, Any] =activation_dropout
lowerCamelCase__ : Tuple =activation_function
lowerCamelCase__ : str =init_std
lowerCamelCase__ : List[Any] =encoder_layerdrop
lowerCamelCase__ : List[str] =decoder_layerdrop
lowerCamelCase__ : Tuple =use_cache
lowerCamelCase__ : Optional[Any] =encoder_layers
lowerCamelCase__ : List[Any] =scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase, bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, is_encoder_decoder=lowerCamelCase, decoder_start_token_id=lowerCamelCase, forced_eos_token_id=lowerCamelCase, **lowerCamelCase, )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def snake_case ( self : Optional[int] )-> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : int =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCamelCase__ : List[str] ={0: '''batch'''}
lowerCamelCase__ : Tuple ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCamelCase__ : Union[str, Any] ={0: '''batch''', 1: '''decoder_sequence'''}
lowerCamelCase__ : Tuple ={0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase, direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase__ : Optional[Any] =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : List[str] =self.num_layers
for i in range(lowerCamelCase ):
lowerCamelCase__ : Optional[Any] ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCamelCase__ : Optional[int] ={0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowerCamelCase__ : str =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def snake_case ( self : Dict )-> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Dict =super().outputs
else:
lowerCamelCase__ : Optional[Any] =super(lowerCamelCase, self ).outputs
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ : str =self.num_layers
for i in range(lowerCamelCase ):
lowerCamelCase__ : Tuple ={0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCamelCase__ : Dict ={0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def snake_case ( self : List[str], lowerCamelCase : PreTrainedTokenizer, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional[TensorType] = None, )-> Mapping[str, Any]:
lowerCamelCase__ : Any =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Generate decoder inputs
lowerCamelCase__ : str =seq_length if not self.use_past else 1
lowerCamelCase__ : Tuple =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : str ={F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase__ : Tuple =dict(**lowerCamelCase, **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : List[Any] =common_inputs['''input_ids'''].shape
lowerCamelCase__ : Optional[Any] =common_inputs['''decoder_input_ids'''].shape[1]
lowerCamelCase__ , lowerCamelCase__ : List[str] =self.num_attention_heads
lowerCamelCase__ : str =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : List[Any] =decoder_seq_length + 3
lowerCamelCase__ : Optional[int] =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase__ : Optional[Any] =torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowerCamelCase, lowerCamelCase )], dim=1 )
lowerCamelCase__ : Tuple =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase__ , lowerCamelCase__ : int =self.num_layers
lowerCamelCase__ : Any =min(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Any =max(lowerCamelCase, lowerCamelCase ) - min_num_layers
lowerCamelCase__ : Dict ='''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
lowerCamelCase__ : Union[str, Any] =encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowerCamelCase, lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def snake_case ( self : List[str], lowerCamelCase : PreTrainedTokenizer, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional[TensorType] = None, )-> Mapping[str, Any]:
lowerCamelCase__ : int =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ : List[str] =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCamelCase__ : Union[str, Any] =seqlen + 2
lowerCamelCase__ , lowerCamelCase__ : int =self.num_layers
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.num_attention_heads
lowerCamelCase__ : int =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ : str =common_inputs['''attention_mask'''].dtype
lowerCamelCase__ : int =torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowerCamelCase, lowerCamelCase, dtype=lowerCamelCase )], dim=1 )
lowerCamelCase__ : str =[
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def snake_case ( self : Optional[int], lowerCamelCase : PreTrainedTokenizer, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional[TensorType] = None, )-> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCamelCase__ : int =compute_effective_axis_dimension(
lowerCamelCase, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__ : Optional[int] =tokenizer.num_special_tokens_to_add(lowerCamelCase )
lowerCamelCase__ : Optional[int] =compute_effective_axis_dimension(
lowerCamelCase, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__ : Optional[int] =[''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase__ : Optional[Any] =dict(tokenizer(lowerCamelCase, return_tensors=lowerCamelCase ) )
return common_inputs
def snake_case ( self : List[Any], lowerCamelCase : PreTrainedTokenizer, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional[TensorType] = None, )-> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Union[str, Any] =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase, batch_size=lowerCamelCase, seq_length=lowerCamelCase, is_pair=lowerCamelCase, framework=lowerCamelCase )
elif self.task == "causal-lm":
lowerCamelCase__ : Union[str, Any] =self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase, batch_size=lowerCamelCase, seq_length=lowerCamelCase, is_pair=lowerCamelCase, framework=lowerCamelCase )
else:
lowerCamelCase__ : List[Any] =self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase, batch_size=lowerCamelCase, seq_length=lowerCamelCase, is_pair=lowerCamelCase, framework=lowerCamelCase )
return common_inputs
def snake_case ( self : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, lowerCamelCase : Any )-> str:
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ : Any =super()._flatten_past_key_values_(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
else:
lowerCamelCase__ : List[str] =super(lowerCamelCase, self )._flatten_past_key_values_(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
| 272
| 0
|
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( a__ ):
lowerCAmelCase :List[str] = (CMStochasticIterativeScheduler,)
lowerCAmelCase :str = 10
def snake_case__ ( self , **_lowerCamelCase):
UpperCAmelCase__ : int = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**_lowerCamelCase)
return config
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = 10
UpperCAmelCase__ : Tuple = self.get_scheduler_config()
UpperCAmelCase__ : str = self.scheduler_classes[0](**_lowerCamelCase)
scheduler.set_timesteps(_lowerCamelCase)
UpperCAmelCase__ : int = scheduler.timesteps[0]
UpperCAmelCase__ : Union[str, Any] = scheduler.timesteps[1]
UpperCAmelCase__ : List[Any] = self.dummy_sample
UpperCAmelCase__ : List[str] = 0.1 * sample
UpperCAmelCase__ : Optional[Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase).prev_sample
UpperCAmelCase__ : Optional[Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def snake_case__ ( self):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase)
def snake_case__ ( self):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase__ : Dict = self.get_scheduler_config()
UpperCAmelCase__ : Any = scheduler_class(**_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = 1
scheduler.set_timesteps(_lowerCamelCase)
UpperCAmelCase__ : Tuple = scheduler.timesteps
UpperCAmelCase__ : Optional[Any] = torch.manual_seed(0)
UpperCAmelCase__ : str = self.dummy_model()
UpperCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_lowerCamelCase):
# 1. scale model input
UpperCAmelCase__ : int = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase)
# 2. predict noise residual
UpperCAmelCase__ : str = model(_lowerCamelCase , _lowerCamelCase)
# 3. predict previous sample x_t-1
UpperCAmelCase__ : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase).prev_sample
UpperCAmelCase__ : Optional[Any] = pred_prev_sample
UpperCAmelCase__ : Optional[int] = torch.sum(torch.abs(_lowerCamelCase))
UpperCAmelCase__ : List[Any] = torch.mean(torch.abs(_lowerCamelCase))
assert abs(result_sum.item() - 192.7614) < 1e-2
assert abs(result_mean.item() - 0.2510) < 1e-3
def snake_case__ ( self):
UpperCAmelCase__ : str = self.scheduler_classes[0]
UpperCAmelCase__ : List[str] = self.get_scheduler_config()
UpperCAmelCase__ : List[Any] = scheduler_class(**_lowerCamelCase)
UpperCAmelCase__ : List[str] = [106, 0]
scheduler.set_timesteps(timesteps=_lowerCamelCase)
UpperCAmelCase__ : List[str] = scheduler.timesteps
UpperCAmelCase__ : str = torch.manual_seed(0)
UpperCAmelCase__ : Tuple = self.dummy_model()
UpperCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCAmelCase__ : Dict = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase)
# 2. predict noise residual
UpperCAmelCase__ : str = model(_lowerCamelCase , _lowerCamelCase)
# 3. predict previous sample x_t-1
UpperCAmelCase__ : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase).prev_sample
UpperCAmelCase__ : List[str] = pred_prev_sample
UpperCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(_lowerCamelCase))
UpperCAmelCase__ : int = torch.mean(torch.abs(_lowerCamelCase))
assert abs(result_sum.item() - 347.6357) < 1e-2
assert abs(result_mean.item() - 0.4527) < 1e-3
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase__ : List[Any] = self.get_scheduler_config()
UpperCAmelCase__ : Union[str, Any] = scheduler_class(**_lowerCamelCase)
UpperCAmelCase__ : int = [39, 30, 12, 15, 0]
with self.assertRaises(_lowerCamelCase , msg="""`timesteps` must be in descending order."""):
scheduler.set_timesteps(timesteps=_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = self.scheduler_classes[0]
UpperCAmelCase__ : Tuple = self.get_scheduler_config()
UpperCAmelCase__ : List[Any] = scheduler_class(**_lowerCamelCase)
UpperCAmelCase__ : List[str] = [39, 30, 12, 1, 0]
UpperCAmelCase__ : Any = len(_lowerCamelCase)
with self.assertRaises(_lowerCamelCase , msg="""Can only pass one of `num_inference_steps` or `timesteps`."""):
scheduler.set_timesteps(num_inference_steps=_lowerCamelCase , timesteps=_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase__ : Any = self.get_scheduler_config()
UpperCAmelCase__ : List[str] = scheduler_class(**_lowerCamelCase)
UpperCAmelCase__ : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCamelCase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_lowerCamelCase)
| 163
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ( ):
UpperCAmelCase__ : Any = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
UpperCAmelCase__ : Optional[int] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("""RGB""" )
return image
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : int = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Union[str, Any] = dct.pop(UpperCamelCase__ )
UpperCAmelCase__ : Dict = val
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase__ : List[Any] = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase__ : Any = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase__ : Union[str, Any] = torch.cat((q_bias, torch.zeros_like(UpperCamelCase__ , requires_grad=UpperCamelCase__ ), v_bias) )
UpperCAmelCase__ : Tuple = qkv_bias
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Optional[int] = 3_6_4 if """coco""" in model_name else 2_2_4
UpperCAmelCase__ : int = BlipaVisionConfig(image_size=UpperCamelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase__ : str = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=UpperCamelCase__ ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase__ : List[Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=UpperCamelCase__ ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase__ : Dict = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase__ : Optional[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
UpperCAmelCase__ : int = BlipaConfig(vision_config=UpperCamelCase__ , text_config=UpperCamelCase__ )
return config, image_size
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=False ):
UpperCAmelCase__ : Tuple = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
UpperCAmelCase__ : int = tokenizer("""\n""" , add_special_tokens=UpperCamelCase__ ).input_ids[0]
UpperCAmelCase__ , UpperCAmelCase__ : Any = get_blipa_config(UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
UpperCAmelCase__ : List[str] = BlipaForConditionalGeneration(UpperCamelCase__ ).eval()
UpperCAmelCase__ : int = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
UpperCAmelCase__ : Union[str, Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = load_model_and_preprocess(
name=UpperCamelCase__ , model_type=UpperCamelCase__ , is_eval=UpperCamelCase__ , device=UpperCamelCase__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
UpperCAmelCase__ : List[Any] = original_model.state_dict()
UpperCAmelCase__ : Union[str, Any] = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase__ : str = state_dict.pop(UpperCamelCase__ )
if key.startswith("""Qformer.bert""" ):
UpperCAmelCase__ : Any = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
UpperCAmelCase__ : Dict = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
UpperCAmelCase__ : Any = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
UpperCAmelCase__ : int = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
UpperCAmelCase__ : Optional[int] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
UpperCAmelCase__ : int = key.replace("""t5""" , """language""" )
UpperCAmelCase__ : List[str] = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ : Any = hf_model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase__ : List[Any] = load_demo_image()
UpperCAmelCase__ : Any = vis_processors["""eval"""](UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
UpperCAmelCase__ : Any = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(UpperCamelCase__ )
# create processor
UpperCAmelCase__ : int = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ )
UpperCAmelCase__ : Any = BlipaProcessor(image_processor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
UpperCAmelCase__ : Tuple = processor(images=UpperCamelCase__ , return_tensors="""pt""" ).pixel_values.to(UpperCamelCase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
hf_model.to(UpperCamelCase__ )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase__ : List[str] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
UpperCAmelCase__ : Union[str, Any] = hf_model(UpperCamelCase__ , UpperCamelCase__ ).logits
else:
UpperCAmelCase__ : List[str] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
UpperCAmelCase__ : Any = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
UpperCAmelCase__ : Optional[Any] = hf_model(UpperCamelCase__ , UpperCamelCase__ , labels=UpperCamelCase__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase__ : Any = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=UpperCamelCase__ )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase__ : int = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=UpperCamelCase__ )
else:
# cast to same type
UpperCAmelCase__ : int = logits.dtype
assert torch.allclose(original_logits.to(UpperCamelCase__ ) , UpperCamelCase__ , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
UpperCAmelCase__ : Union[str, Any] = """"""
UpperCAmelCase__ : Dict = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ).input_ids.to(UpperCamelCase__ )
UpperCAmelCase__ : int = original_model.generate({"""image""": original_pixel_values} )
UpperCAmelCase__ : Optional[Any] = hf_model.generate(
UpperCamelCase__ , UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , UpperCamelCase__ )
UpperCAmelCase__ : List[Any] = input_ids.shape[1]
UpperCAmelCase__ : Optional[Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCamelCase__ )
UpperCAmelCase__ : Any = [text.strip() for text in output_text]
print("""HF generation:""" , UpperCamelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
__A =[
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__A =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 163
| 1
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__A = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowerCAmelCase_ ( __a ) -> Union[str, Any]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def lowerCAmelCase_ ( __a ) -> Tuple:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowerCamelCase__: str =terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__a , id=__a )
| 361
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = CTRLTokenizer
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__: Optional[Any] =["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
lowerCamelCase__: Optional[Any] =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: Union[str, Any] =["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
lowerCamelCase__: Union[str, Any] ={"unk_token": "<unk>"}
lowerCamelCase__: Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
lowerCamelCase__: Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(UpperCAmelCase_) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : List[str] , **UpperCAmelCase_ : str) ->List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Optional[int]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int ="adapt react readapt apt"
lowerCamelCase__: int ="adapt react readapt apt"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
lowerCamelCase__: Union[str, Any] ="adapt react readapt apt"
lowerCamelCase__: Tuple ="adapt re@@ a@@ c@@ t re@@ adapt apt".split()
lowerCamelCase__: Dict =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =tokens + [tokenizer.unk_token]
lowerCamelCase__: List[str] =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 273
| 0
|
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCAmelCase ( a_ , a_=None ) -> Optional[Any]:
"""simple docstring"""
A_ : Optional[int] = None
if token is not None:
A_ : Optional[int] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
A_ : Tuple = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
A_ : Optional[int] = requests.get(a_ , headers=a_ ).json()
A_ : Union[str, Any] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
A_ : List[Any] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(a_ ):
A_ : List[str] = requests.get(url + F"&page={i + 2}" , headers=a_ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCAmelCase ( a_ , a_=None ) -> Optional[int]:
"""simple docstring"""
A_ : Tuple = None
if token is not None:
A_ : Dict = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
A_ : Optional[Any] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
A_ : Tuple = requests.get(a_ , headers=a_ ).json()
A_ : Optional[Any] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
A_ : str = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(a_ ):
A_ : Optional[int] = requests.get(url + F"&page={i + 2}" , headers=a_ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
A_ : List[str] = None
if token is not None:
A_ : Tuple = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
A_ : Tuple = requests.get(a_ , headers=a_ , allow_redirects=a_ )
A_ : Dict = result.headers["""Location"""]
A_ : str = requests.get(a_ , allow_redirects=a_ )
A_ : Optional[int] = os.path.join(a_ , F"{artifact_name}.zip" )
with open(a_ , """wb""" ) as fp:
fp.write(response.content )
def UpperCAmelCase ( a_ , a_=None ) -> Any:
"""simple docstring"""
A_ : Tuple = []
A_ : Any = []
A_ : List[str] = None
with zipfile.ZipFile(a_ ) as z:
for filename in z.namelist():
if not os.path.isdir(a_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(a_ ) as f:
for line in f:
A_ : Tuple = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
A_ : str = line[: line.index(""": """ )]
A_ : int = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
A_ : List[Any] = line[len("""FAILED """ ) :]
failed_tests.append(a_ )
elif filename == "job_name.txt":
A_ : Dict = line
if len(a_ ) != len(a_ ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(a_ )} for `errors` "
F"and {len(a_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
A_ : Dict = None
if job_name and job_links:
A_ : Union[str, Any] = job_links.get(a_ , a_ )
# A list with elements of the form (line of error, error, failed test)
A_ : int = [x + [y] + [job_link] for x, y in zip(a_ , a_ )]
return result
def UpperCAmelCase ( a_ , a_=None ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = []
A_ : List[Any] = [os.path.join(a_ , a_ ) for p in os.listdir(a_ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(a_ , job_links=a_ ) )
return errors
def UpperCAmelCase ( a_ , a_=None ) -> Tuple:
"""simple docstring"""
A_ : Dict = Counter()
counter.update([x[1] for x in logs] )
A_ : Union[str, Any] = counter.most_common()
A_ : Union[str, Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
A_ : Tuple = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
A_ : Optional[Any] = dict(sorted(r.items() , key=lambda a_ : item[1]["count"] , reverse=a_ ) )
return r
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
A_ : Any = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
A_ : List[str] = test.split("""/""" )[2]
else:
A_ : int = None
return test
def UpperCAmelCase ( a_ , a_=None ) -> Union[str, Any]:
"""simple docstring"""
A_ : Tuple = [(x[0], x[1], get_model(x[2] )) for x in logs]
A_ : Dict = [x for x in logs if x[2] is not None]
A_ : Optional[int] = {x[2] for x in logs}
A_ : Tuple = {}
for test in tests:
A_ : Union[str, Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
A_ : List[Any] = counter.most_common()
A_ : Dict = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
A_ : Union[str, Any] = sum(error_counts.values() )
if n_errors > 0:
A_ : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
A_ : Dict = dict(sorted(r.items() , key=lambda a_ : item[1]["count"] , reverse=a_ ) )
return r
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
A_ : Optional[int] = """| no. | error | status |"""
A_ : Any = """|-:|:-|:-|"""
A_ : Optional[Any] = [header, sep]
for error in reduced_by_error:
A_ : Any = reduced_by_error[error]["""count"""]
A_ : int = F"| {count} | {error[:1_0_0]} | |"
lines.append(a_ )
return "\n".join(a_ )
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
A_ : Any = """| model | no. of errors | major error | count |"""
A_ : str = """|-:|-:|-:|-:|"""
A_ : List[str] = [header, sep]
for model in reduced_by_model:
A_ : List[str] = reduced_by_model[model]["""count"""]
A_ , A_ : List[str] = list(reduced_by_model[model]["""errors"""].items() )[0]
A_ : int = F"| {model} | {count} | {error[:6_0]} | {_count} |"
lines.append(a_ )
return "\n".join(a_ )
if __name__ == "__main__":
UpperCamelCase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
UpperCamelCase__ : Any = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
UpperCamelCase__ : Dict = get_job_links(args.workflow_run_id, token=args.token)
UpperCamelCase__ : Union[str, Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
UpperCamelCase__ : Any = k.find(' / ')
UpperCamelCase__ : int = k[index + len(' / ') :]
UpperCamelCase__ : Optional[Any] = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
UpperCamelCase__ : List[Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
UpperCamelCase__ : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
UpperCamelCase__ : int = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
UpperCamelCase__ : Dict = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
UpperCamelCase__ : List[Any] = reduce_by_error(errors)
UpperCamelCase__ : Any = reduce_by_model(errors)
UpperCamelCase__ : List[Any] = make_github_table(reduced_by_error)
UpperCamelCase__ : Union[str, Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 344
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=3 , _lowerCamelCase=10 , _lowerCamelCase=[10, 20, 30, 40] , _lowerCamelCase=[1, 1, 2, 1] , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="relu" , _lowerCamelCase=3 , _lowerCamelCase=None , ) -> List[str]:
A_ : Any = parent
A_ : List[Any] = batch_size
A_ : List[Any] = image_size
A_ : Optional[int] = num_channels
A_ : Tuple = embeddings_size
A_ : str = hidden_sizes
A_ : Optional[Any] = depths
A_ : Any = is_training
A_ : int = use_labels
A_ : int = hidden_act
A_ : Optional[Any] = num_labels
A_ : str = scope
A_ : Optional[int] = len(_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = None
if self.use_labels:
A_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
A_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
A_ : Dict = TFRegNetModel(config=_lowerCamelCase )
A_ : Optional[int] = model(_lowerCamelCase , training=_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
A_ : Optional[Any] = self.num_labels
A_ : int = TFRegNetForImageClassification(_lowerCamelCase )
A_ : Tuple = model(_lowerCamelCase , labels=_lowerCamelCase , training=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ) -> str:
A_ : Any = self.prepare_config_and_inputs()
A_ , A_ , A_ : str = config_and_inputs
A_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Dict = TFRegNetModelTester(self )
A_ : Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> str:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Dict:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def UpperCAmelCase_ ( self ) -> int:
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
pass
def UpperCAmelCase_ ( self ) -> int:
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A_ : Optional[int] = model_class(_lowerCamelCase )
A_ : List[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) , training=_lowerCamelCase )
A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Dict = layer_type
A_ : List[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase={} ):
A_ : Dict = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase )
A_ : Optional[Any] = model(_lowerCamelCase , return_dict=_lowerCamelCase , **_lowerCamelCase ).to_tuple()
def recursive_check(_lowerCamelCase , _lowerCamelCase ):
if isinstance(_lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase , _lowerCamelCase ):
recursive_check(_lowerCamelCase , _lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_lowerCamelCase , _lowerCamelCase ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(_lowerCamelCase , _lowerCamelCase )
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_lowerCamelCase )
A_ : Optional[Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[int] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : Dict = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : int = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , {"""output_hidden_states""": True} )
def UpperCAmelCase_ ( self ) -> str:
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = TFRegNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self ) -> int:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : Tuple = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : Any = image_processor(images=_lowerCamelCase , return_tensors="""tf""" )
# forward pass
A_ : List[Any] = model(**_lowerCamelCase , training=_lowerCamelCase )
# verify the logits
A_ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
| 344
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a_ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : int ) -> Dict:
"""simple docstring"""
# Construct model
if gpta_config_file == "":
lowerCamelCase_ =GPTaConfig()
else:
lowerCamelCase_ =GPTaConfig.from_json_file(__snake_case )
lowerCamelCase_ =GPTaModel(__snake_case )
# Load weights from numpy
load_tf_weights_in_gpta(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
lowerCamelCase_ =pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowerCamelCase_ =pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , __snake_case )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
a_ : List[Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 354
|
'''simple docstring'''
from collections import defaultdict
from math import gcd
def a_ ( __snake_case : int = 150_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =defaultdict(__snake_case )
lowerCamelCase_ =2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ):
if gcd(__snake_case , __snake_case ) > 1:
continue
lowerCamelCase_ =2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__snake_case , limit + 1 , __snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6
| 0
|
from __future__ import annotations
import math
__lowerCAmelCase = '''2020.9.26'''
__lowerCAmelCase = '''xcodz-dot, cclaus, dhruvmanila'''
def snake_case_ ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> tuple[float, float]:
if not all(isinstance(snake_case , (float, int) ) for val in locals().values() ):
lowercase__: str = f'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(snake_case )
lowercase__: List[Any] = ((x * distance) / (z + distance)) * scale
lowercase__: List[str] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def snake_case_ ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> tuple[float, float, float]:
if not isinstance(snake_case , snake_case ):
raise TypeError('Axis must be a str' )
lowercase__: Any = locals()
del input_variables["axis"]
if not all(isinstance(snake_case , (float, int) ) for val in input_variables.values() ):
lowercase__: int = (
'Input values except axis must either be float or int: '
f'{list(input_variables.values() )}'
)
raise TypeError(snake_case )
lowercase__: Optional[int] = (angle % 3_60) / 4_50 * 1_80 / math.pi
if axis == "z":
lowercase__: Optional[int] = x * math.cos(snake_case ) - y * math.sin(snake_case )
lowercase__: Dict = y * math.cos(snake_case ) + x * math.sin(snake_case )
lowercase__: Tuple = z
elif axis == "x":
lowercase__: Optional[Any] = y * math.cos(snake_case ) - z * math.sin(snake_case )
lowercase__: Optional[int] = z * math.cos(snake_case ) + y * math.sin(snake_case )
lowercase__: List[str] = x
elif axis == "y":
lowercase__: List[str] = x * math.cos(snake_case ) - z * math.sin(snake_case )
lowercase__: int = z * math.cos(snake_case ) + x * math.sin(snake_case )
lowercase__: List[Any] = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''')
print(F'''{rotate(1.0, 2.0, 3.0, "y", 90.0) = }''')
| 196
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__lowerCAmelCase = logging.get_logger(__name__)
def snake_case_ ( snake_case , snake_case ) -> List[str]:
lowercase__: List[str] = set()
lowercase__: List[Any] = []
def parse_line(snake_case ):
for line in fp:
if isinstance(snake_case , snake_case ):
lowercase__: Optional[Any] = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(snake_case ) > 0:
lowercase__: List[str] = '\n'.join(snake_case )
# Only keep the warnings specified in `targets`
if any(f': {x}: ' in warning for x in targets ):
selected_warnings.add(snake_case )
buffer.clear()
continue
else:
lowercase__: Union[str, Any] = line.strip()
buffer.append(snake_case )
if from_gh:
for filename in os.listdir(snake_case ):
lowercase__: Dict = os.path.join(snake_case , snake_case )
if not os.path.isdir(snake_case ):
# read the file
if filename != "warnings.txt":
continue
with open(snake_case ) as fp:
parse_line(snake_case )
else:
try:
with zipfile.ZipFile(snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case ):
# read the file
if filename != "warnings.txt":
continue
with z.open(snake_case ) as fp:
parse_line(snake_case )
except Exception:
logger.warning(
f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def snake_case_ ( snake_case , snake_case ) -> Any:
lowercase__: Optional[Any] = set()
lowercase__: int = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) )
return selected_warnings
if __name__ == "__main__":
def snake_case_ ( snake_case ) -> str:
return values.split(',' )
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__lowerCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__lowerCAmelCase = extract_warnings(args.output_dir, args.targets)
__lowerCAmelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 196
| 1
|
'''simple docstring'''
def a ( __a = 1000000 ):
'''simple docstring'''
UpperCamelCase__ :int = 1
UpperCamelCase__ :Optional[int] = 1
UpperCamelCase__ :List[Any] = {1: 1}
for inputa in range(2 , __a ):
UpperCamelCase__ :Union[str, Any] = 0
UpperCamelCase__ :Optional[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
UpperCamelCase__ :Union[str, Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
UpperCamelCase__ :int = counter
if counter > pre_counter:
UpperCamelCase__ :Union[str, Any] = inputa
UpperCamelCase__ :int = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 361
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def a ( __a=None ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = argparse.ArgumentParser(add_help=__a , allow_abbrev=__a )
# The main config parser
UpperCamelCase__ :str = config_command_parser(__a )
# The subparser to add commands to
UpperCamelCase__ :Union[str, Any] = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(__a , parents=[parent_parser] )
update_command_parser(__a , parents=[parent_parser] )
return config_parser
def a ( ) -> Any:
'''simple docstring'''
UpperCamelCase__ :int = get_config_parser()
UpperCamelCase__ :List[Any] = config_parser.parse_args()
if not hasattr(__a , '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(__a )
if __name__ == "__main__":
main()
| 219
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__lowercase = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272
|
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class a__( enum.Enum ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Dict = 1
UpperCAmelCase_ : Any = 2
@add_end_docstrings(lowerCAmelCase__ )
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : int = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCAmelCase = None
if self.model.config.prefix is not None:
lowerCAmelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCAmelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params)
lowerCAmelCase = {**self._preprocess_params, **preprocess_params}
lowerCAmelCase = {**self._forward_params, **forward_params}
def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = {}
if prefix is not None:
lowerCAmelCase = prefix
if prefix:
lowerCAmelCase = self.tokenizer(
__lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
""" [None, 'hole']""")
lowerCAmelCase = handle_long_generation
preprocess_params.update(__lowerCAmelCase)
lowerCAmelCase = generate_kwargs
lowerCAmelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""")
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""")
lowerCAmelCase = ReturnType.TENSORS
if return_type is not None:
lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
if len(__lowerCAmelCase) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""")
lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True})
return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase)
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.tokenizer(
prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework)
lowerCAmelCase = prompt_text
if handle_long_generation == "hole":
lowerCAmelCase = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCAmelCase = generate_kwargs["""max_new_tokens"""]
else:
lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""")
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCAmelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""")
lowerCAmelCase = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCAmelCase = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = model_inputs["""input_ids"""]
lowerCAmelCase = model_inputs.get("""attention_mask""" , __lowerCAmelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = 1
else:
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = model_inputs.pop("""prompt_text""")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCAmelCase = generate_kwargs.pop("""prefix_length""" , 0)
if prefix_length > 0:
lowerCAmelCase = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCAmelCase = generate_kwargs.get("""max_length""") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCAmelCase = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCAmelCase = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = generated_sequence.shape[0]
if self.framework == "pt":
lowerCAmelCase = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:])
elif self.framework == "tf":
lowerCAmelCase = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.FULL_TEXT , __lowerCAmelCase=True):
"""simple docstring"""
lowerCAmelCase = model_outputs["""generated_sequence"""][0]
lowerCAmelCase = model_outputs["""input_ids"""]
lowerCAmelCase = model_outputs["""prompt_text"""]
lowerCAmelCase = generated_sequence.numpy().tolist()
lowerCAmelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCAmelCase = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCAmelCase = self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCAmelCase = 0
else:
lowerCAmelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ))
if return_type == ReturnType.FULL_TEXT:
lowerCAmelCase = prompt_text + text[prompt_length:]
else:
lowerCAmelCase = text[prompt_length:]
lowerCAmelCase = {"""generated_text""": all_text}
records.append(__lowerCAmelCase)
return records
| 272
| 1
|
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__snake_case : Optional[Any] = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 48_000,
'sample_size': 65_536,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 48_000,
'sample_size': 65_536,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 48_000,
'sample_size': 131_072,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 16_000,
'sample_size': 65_536,
},
}
def _UpperCAmelCase ( _UpperCamelCase : List[Any], _UpperCamelCase : Any ) -> int:
return torch.atana(_UpperCamelCase, _UpperCamelCase ) / math.pi * 2
def _UpperCAmelCase ( _UpperCamelCase : List[str] ) -> List[Any]:
A_ = torch.sin(t * math.pi / 2 ) ** 2
A_ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_UpperCamelCase, _UpperCamelCase )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
pass
class __UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
super().__init__()
A_ = DiffusionAttnUnetaD(_SCREAMING_SNAKE_CASE , n_attn_layers=4 )
A_ = deepcopy(self.diffusion )
A_ = torch.quasirandom.SobolEngine(1 , scramble=_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _UpperCamelCase : List[Any] ) -> Tuple:
A_ = MODELS_MAP[model_name]['''url''']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
__snake_case : Optional[int] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
__snake_case : Union[str, Any] = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
__snake_case : Optional[int] = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
__snake_case : int = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
__snake_case : List[Any] = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
__snake_case : Optional[int] = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def _UpperCAmelCase ( _UpperCamelCase : str ) -> List[Any]:
if name.startswith('''skip''' ):
return name.replace('''skip''', RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6], RES_CONV_MAP[name[:6]] )
def _UpperCAmelCase ( _UpperCamelCase : Optional[Any] ) -> List[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(_UpperCamelCase ) and not isinstance(_UpperCamelCase, _UpperCamelCase ):
return name.replace(_UpperCamelCase, _UpperCamelCase )
elif name.startswith(_UpperCamelCase ):
return [name.replace(_UpperCamelCase, _UpperCamelCase ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def _UpperCAmelCase ( _UpperCamelCase : List[Any], _UpperCamelCase : Optional[int]=13 ) -> List[str]:
A_ = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''', '''time_proj''' )
A_ = 0
if string.startswith('''net.3.''' ):
depth += 1
A_ = string[6:]
elif string.startswith('''net.''' ):
A_ = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
A_ = string[7:]
if string.startswith('''main.''' ):
A_ = string[5:]
# mid block
if string[:2].isdigit():
A_ = string[:2]
A_ = string[2:]
else:
A_ = string[0]
A_ = string[1:]
if depth == max_depth:
A_ = MID_NUM_TO_LAYER[layer_num]
A_ = '''mid_block'''
elif depth > 0 and int(_UpperCamelCase ) < 7:
A_ = DOWN_NUM_TO_LAYER[layer_num]
A_ = F'''down_blocks.{depth}'''
elif depth > 0 and int(_UpperCamelCase ) > 7:
A_ = UP_NUM_TO_LAYER[layer_num]
A_ = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
A_ = DEPTH_0_TO_LAYER[layer_num]
A_ = F'''up_blocks.{max_depth - 1}''' if int(_UpperCamelCase ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
A_ = string_left[1:]
if "resnets" in new_layer:
A_ = convert_resconv_naming(_UpperCamelCase )
elif "attentions" in new_layer:
A_ = convert_attn_naming(_UpperCamelCase )
A_ = new_string_left
if not isinstance(_UpperCamelCase, _UpperCamelCase ):
A_ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
A_ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def _UpperCAmelCase ( _UpperCamelCase : List[str] ) -> List[str]:
A_ = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
A_ = rename(_UpperCamelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_UpperCamelCase, _UpperCamelCase ):
A_ = transform_conv_attns(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
else:
A_ = v
return new_state_dict
def _UpperCAmelCase ( _UpperCamelCase : Tuple, _UpperCamelCase : Optional[int], _UpperCamelCase : str ) -> Dict:
if len(_UpperCamelCase ) == 1:
if len(v.shape ) == 3:
# weight
A_ = v[:, :, 0]
else:
# bias
A_ = v
else:
# qkv matrices
A_ = v.shape[0]
A_ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
A_ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
A_ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def _UpperCAmelCase ( _UpperCamelCase : int ) -> Optional[Any]:
A_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
A_ = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
A_ = download(_UpperCamelCase )
A_ = MODELS_MAP[model_name]['''sample_rate''']
A_ = MODELS_MAP[model_name]['''sample_size''']
A_ = Object()
A_ = sample_size
A_ = sample_rate
A_ = 0
A_ = UNetaDModel(sample_size=_UpperCamelCase, sample_rate=_UpperCamelCase )
A_ = diffusers_model.state_dict()
A_ = DiffusionUncond(_UpperCamelCase )
orig_model.load_state_dict(torch.load(args.model_path, map_location=_UpperCamelCase )['''state_dict'''] )
A_ = orig_model.diffusion_ema.eval()
A_ = orig_model.state_dict()
A_ = rename_orig_weights(_UpperCamelCase )
A_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
A_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_UpperCamelCase ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('''kernel''' ) for k in list(_UpperCamelCase ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
A_ = value.squeeze()
A_ = value
diffusers_model.load_state_dict(_UpperCamelCase )
A_ = 1_00
A_ = 33
A_ = IPNDMScheduler(num_train_timesteps=_UpperCamelCase )
A_ = torch.manual_seed(_UpperCamelCase )
A_ = torch.randn([1, 2, config.sample_size], generator=_UpperCamelCase ).to(_UpperCamelCase )
A_ = torch.linspace(1, 0, steps + 1, device=_UpperCamelCase )[:-1]
A_ = get_crash_schedule(_UpperCamelCase )
A_ = DanceDiffusionPipeline(unet=_UpperCamelCase, scheduler=_UpperCamelCase )
A_ = torch.manual_seed(33 )
A_ = pipe(num_inference_steps=_UpperCamelCase, generator=_UpperCamelCase ).audios
A_ = sampling.iplms_sample(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, {} )
A_ = generated.clamp(-1, 1 )
A_ = (generated - audio).abs().sum()
A_ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''', _UpperCamelCase )
print('''Diff max''', _UpperCamelCase )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
__snake_case : List[Any] = parser.parse_args()
main(args)
| 18
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def _UpperCAmelCase ( _UpperCamelCase : str ) -> int:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
super().__init__()
A_ = module
A_ = nn.Sequential(
nn.Linear(module.in_features , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) , nn.Linear(_SCREAMING_SNAKE_CASE , module.out_features , bias=_SCREAMING_SNAKE_CASE ) , )
A_ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_SCREAMING_SNAKE_CASE )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __A ( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Tuple:
return self.module(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) + self.adapter(_SCREAMING_SNAKE_CASE )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = 'bigscience/bloom-1b7'
# Constant values
__lowercase : str = 2.109659552692574
__lowercase : int = 'Hello my name is'
__lowercase : Optional[Any] = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
__lowercase : Optional[Any] = 10
def __A ( self ) -> List[str]:
# Models and tokenizer
A_ = AutoTokenizer.from_pretrained(self.model_name )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> List[Any]:
super().setUp()
# Models and tokenizer
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
A_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
def __A ( self ) -> List[str]:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Tuple:
A_ = self.model_abit.config
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''quantization_config''' ) )
A_ = config.to_dict()
A_ = config.to_diff_dict()
A_ = config.to_json_string()
def __A ( self ) -> Union[str, Any]:
from bitsandbytes.nn import Paramsabit
A_ = self.model_fpaa.get_memory_footprint()
A_ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A_ = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __A ( self ) -> Union[str, Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_SCREAMING_SNAKE_CASE , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __A ( self ) -> Optional[int]:
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
A_ = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
def __A ( self ) -> Optional[int]:
A_ = BitsAndBytesConfig()
A_ = True
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
A_ = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
def __A ( self ) -> Tuple:
with self.assertRaises(_SCREAMING_SNAKE_CASE ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Tuple:
A_ = BitsAndBytesConfig()
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_SCREAMING_SNAKE_CASE , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def __A ( self ) -> Dict:
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
A_ = self.model_fpaa.to(torch.floataa )
A_ = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A_ = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
A_ = self.model_fpaa.half()
# Check this does not throw an error
A_ = self.model_fpaa.float()
def __A ( self ) -> Optional[int]:
A_ = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __A ( cls ) -> Optional[Any]:
A_ = '''t5-small'''
A_ = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
A_ = AutoTokenizer.from_pretrained(cls.model_name )
A_ = '''Translate in German: Hello, my dog is cute'''
def __A ( self ) -> Any:
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Tuple:
from transformers import TaForConditionalGeneration
A_ = TaForConditionalGeneration._keep_in_fpaa_modules
A_ = None
# test with `t5-small`
A_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
# test with `flan-t5-small`
A_ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
A_ = modules
def __A ( self ) -> Dict:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
# test with `flan-t5-small`
A_ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A_ = model.generate(**_SCREAMING_SNAKE_CASE )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> int:
super().setUp()
# model_name
A_ = '''bigscience/bloom-560m'''
A_ = '''t5-small'''
# Different types of model
A_ = AutoModel.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# Sequence classification model
A_ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# CausalLM model
A_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
# Seq2seq model
A_ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''auto''' )
def __A ( self ) -> Union[str, Any]:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> List[str]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> Tuple:
super().setUp()
def __A ( self ) -> List[Any]:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Optional[Any]:
A_ = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A_ = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> List[str]:
super().setUp()
def __A ( self ) -> Optional[int]:
A_ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A_ = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
A_ = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __A ( self ) -> str:
A_ = '''facebook/opt-350m'''
super().setUp()
def __A ( self ) -> Optional[int]:
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
A_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A_ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A_ = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_SCREAMING_SNAKE_CASE ) ):
A_ = LoRALayer(module.q_proj , rank=16 )
A_ = LoRALayer(module.k_proj , rank=16 )
A_ = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A_ = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A_ = model.forward(**_SCREAMING_SNAKE_CASE )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_SCREAMING_SNAKE_CASE , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : int = 'gpt2-xl'
__lowercase : List[Any] = 3.3191854854152187
| 18
| 1
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
UpperCamelCase__ = logging.getLogger(__name__)
def _a ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
__lowerCAmelCase = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return np.sum(outputs == labels )
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
with open(SCREAMING_SNAKE_CASE_ , encoding="utf_8" ) as f:
__lowerCAmelCase = csv.reader(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = []
next(SCREAMING_SNAKE_CASE_ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCAmelCase = []
for dataset in encoded_datasets:
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__lowerCAmelCase = np.zeros((n_batch, 2) , dtype=np.intaa )
__lowerCAmelCase = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
__lowerCAmelCase = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowerCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowerCAmelCase = with_conta
__lowerCAmelCase = with_conta
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) - 1
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) - 1
__lowerCAmelCase = with_conta
__lowerCAmelCase = with_conta
__lowerCAmelCase = mc_label
__lowerCAmelCase = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE_ ) for t in all_inputs ) )
return tensor_datasets
def _a ( ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE_ , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=SCREAMING_SNAKE_CASE_ , default="" )
parser.add_argument("--eval_dataset" , type=SCREAMING_SNAKE_CASE_ , default="" )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE_ , default=42 )
parser.add_argument("--num_train_epochs" , type=SCREAMING_SNAKE_CASE_ , default=3 )
parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE_ , default=8 )
parser.add_argument("--eval_batch_size" , type=SCREAMING_SNAKE_CASE_ , default=16 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=SCREAMING_SNAKE_CASE_ , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=SCREAMING_SNAKE_CASE_ , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=SCREAMING_SNAKE_CASE_ , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE_ , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE_ , default=6.25E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=SCREAMING_SNAKE_CASE_ , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=SCREAMING_SNAKE_CASE_ , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=SCREAMING_SNAKE_CASE_ , default=0.01 )
parser.add_argument("--lm_coef" , type=SCREAMING_SNAKE_CASE_ , default=0.9 )
parser.add_argument("--n_valid" , type=SCREAMING_SNAKE_CASE_ , default=3_74 )
parser.add_argument("--server_ip" , type=SCREAMING_SNAKE_CASE_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=SCREAMING_SNAKE_CASE_ , default="" , help="Can be used for distant debugging." )
__lowerCAmelCase = parser.parse_args()
print(SCREAMING_SNAKE_CASE_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__lowerCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
__lowerCAmelCase = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__lowerCAmelCase = ["_start_", "_delimiter_", "_classify_"]
__lowerCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE_ ) )
model.to(SCREAMING_SNAKE_CASE_ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE_ ) for o in obj]
logger.info("Encoding dataset..." )
__lowerCAmelCase = load_rocstories_dataset(args.train_dataset )
__lowerCAmelCase = load_rocstories_dataset(args.eval_dataset )
__lowerCAmelCase = (train_dataset, eval_dataset)
__lowerCAmelCase = tokenize_and_encode(SCREAMING_SNAKE_CASE_ )
# Compute the max input length for the Transformer
__lowerCAmelCase = model.config.n_positions // 2 - 2
__lowerCAmelCase = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__lowerCAmelCase = pre_process_datasets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = tensor_datasets[0], tensor_datasets[1]
__lowerCAmelCase = TensorDataset(*SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = RandomSampler(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.train_batch_size )
__lowerCAmelCase = TensorDataset(*SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = SequentialSampler(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__lowerCAmelCase = args.max_steps
__lowerCAmelCase = args.max_steps // (len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps) + 1
else:
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) // args.gradient_accumulation_steps * args.num_train_epochs
__lowerCAmelCase = list(model.named_parameters() )
__lowerCAmelCase = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
__lowerCAmelCase = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
__lowerCAmelCase = AdamW(SCREAMING_SNAKE_CASE_ , lr=args.learning_rate , eps=args.adam_epsilon )
__lowerCAmelCase = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE_ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE_ )
if args.do_train:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = tqdm(SCREAMING_SNAKE_CASE_ , desc="Training" )
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = batch
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__lowerCAmelCase = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__lowerCAmelCase = "Training loss: {:.2e} lr: {:.2e}".format(SCREAMING_SNAKE_CASE_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__lowerCAmelCase = model.module if hasattr(SCREAMING_SNAKE_CASE_ , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__lowerCAmelCase = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE_ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__lowerCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__lowerCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE_ )
if args.do_eval:
model.eval()
__lowerCAmelCase , __lowerCAmelCase = 0, 0
__lowerCAmelCase , __lowerCAmelCase = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , desc="Evaluating" ):
__lowerCAmelCase = tuple(t.to(SCREAMING_SNAKE_CASE_ ) for t in batch )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = batch
with torch.no_grad():
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model(
SCREAMING_SNAKE_CASE_ , mc_token_ids=SCREAMING_SNAKE_CASE_ , lm_labels=SCREAMING_SNAKE_CASE_ , mc_labels=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = mc_logits.detach().cpu().numpy()
__lowerCAmelCase = mc_labels.to("cpu" ).numpy()
__lowerCAmelCase = accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__lowerCAmelCase = eval_loss / nb_eval_steps
__lowerCAmelCase = eval_accuracy / nb_eval_examples
__lowerCAmelCase = tr_loss / nb_tr_steps if args.do_train else None
__lowerCAmelCase = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
__lowerCAmelCase = os.path.join(args.output_dir , "eval_results.txt" )
with open(SCREAMING_SNAKE_CASE_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE_ , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 92
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class A_ :
def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_labels
UpperCAmelCase = use_mc_token_ids
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
if self.use_mc_token_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLModel(config=_A )
model.to(_A )
model.eval()
model(_A , token_type_ids=_A , head_mask=_A )
model(_A , token_type_ids=_A )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def _lowercase ( self , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = CTRLForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class A_ (a_ , a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , n_embd=3_7 )
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@slow
def _lowercase ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = CTRLModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _lowercase ( self ):
'''simple docstring'''
pass
@require_torch
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_A )
UpperCAmelCase = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=_A ) # Legal the president is
UpperCAmelCase = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCAmelCase = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].tolist() , _A )
| 273
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ = 1_6
lowercase_ = 3_2
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 16 ):
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained('bert-base-cased' )
__lowerCamelCase : Optional[int] = load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE__ ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase : Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCamelCase : Any = datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase : List[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCamelCase : Dict = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCamelCase : Tuple = 16
elif accelerator.mixed_precision != "no":
__lowerCamelCase : Any = 8
else:
__lowerCamelCase : Tuple = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE__ , padding='longest' , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_tensors='pt' , )
# Instantiate dataloaders.
__lowerCamelCase : Optional[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : List[str] = DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase_ = mocked_dataloaders # noqa: F811
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , SCREAMING_SNAKE_CASE__ ) == "1":
__lowerCamelCase : str = 2
# New Code #
__lowerCamelCase : List[str] = int(args.gradient_accumulation_steps )
# Initialize accelerator
__lowerCamelCase : str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase : Dict = config['lr']
__lowerCamelCase : Union[str, Any] = int(config['num_epochs'] )
__lowerCamelCase : int = int(config['seed'] )
__lowerCamelCase : str = int(config['batch_size'] )
__lowerCamelCase : str = evaluate.load('glue' , 'mrpc' )
set_seed(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase : Optional[int] = get_dataloaders(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase : str = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCamelCase : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
__lowerCamelCase : Tuple = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ )
# Instantiate scheduler
__lowerCamelCase : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = model(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Tuple = output.loss
accelerator.backward(SCREAMING_SNAKE_CASE__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Any = outputs.logits.argmax(dim=-1 )
__lowerCamelCase , __lowerCamelCase : str = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , SCREAMING_SNAKE_CASE__ )
def UpperCamelCase__ ( ):
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__lowerCamelCase : Dict = parser.parse_args()
__lowerCamelCase : List[str] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 194
|
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
__lowerCamelCase : Union[str, Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
__lowerCamelCase : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
__lowerCamelCase : Optional[int] = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 )
__lowerCamelCase : str = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 255.0
__lowerCamelCase : List[str] = image.transpose(0 , 3 , 1 , 2 )
__lowerCamelCase : Union[str, Any] = 2.0 * image - 1.0
__lowerCamelCase : Tuple = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
elif isinstance(image[0] , torch.Tensor ):
__lowerCamelCase : str = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
return image
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.9_995 ):
if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
__lowerCamelCase : List[str] = True
__lowerCamelCase : str = va.device
__lowerCamelCase : int = va.cpu().numpy()
__lowerCamelCase : List[str] = va.cpu().numpy()
__lowerCamelCase : str = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE__ ) * np.linalg.norm(SCREAMING_SNAKE_CASE__ )) )
if np.abs(SCREAMING_SNAKE_CASE__ ) > DOT_THRESHOLD:
__lowerCamelCase : Union[str, Any] = (1 - t) * va + t * va
else:
__lowerCamelCase : List[Any] = np.arccos(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Dict = np.sin(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str = theta_a * t
__lowerCamelCase : List[Any] = np.sin(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str = np.sin(theta_a - theta_t ) / sin_theta_a
__lowerCamelCase : List[Any] = sin_theta_t / sin_theta_a
__lowerCamelCase : Union[str, Any] = sa * va + sa * va
if inputs_are_torch:
__lowerCamelCase : str = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
return va
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
__lowerCamelCase : Union[str, Any] = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for param in model.parameters():
__lowerCamelCase : Any = value
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: Any , a: AutoencoderKL , a: CLIPTextModel , a: CLIPModel , a: CLIPTokenizer , a: UNetaDConditionModel , a: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , a: CLIPFeatureExtractor , a: Union[str, Any]=None , a: Union[str, Any]=None , a: Union[str, Any]=None , ):
super().__init__()
self.register_modules(
vae=a , text_encoder=a , clip_model=a , tokenizer=a , unet=a , scheduler=a , feature_extractor=a , coca_model=a , coca_tokenizer=a , coca_transform=a , )
__lowerCamelCase : Tuple = (
feature_extractor.size
if isinstance(feature_extractor.size , a )
else feature_extractor.size['shortest_edge']
)
__lowerCamelCase : List[Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , a )
set_requires_grad(self.clip_model , a )
def _snake_case ( self: Optional[Any] , a: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowerCamelCase : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a )
def _snake_case ( self: Dict ):
self.enable_attention_slicing(a )
def _snake_case ( self: Optional[Any] ):
set_requires_grad(self.vae , a )
def _snake_case ( self: List[Any] ):
set_requires_grad(self.vae , a )
def _snake_case ( self: int ):
set_requires_grad(self.unet , a )
def _snake_case ( self: int ):
set_requires_grad(self.unet , a )
def _snake_case ( self: Optional[Any] , a: Union[str, Any] , a: List[str] , a: List[Any] ):
# get the original timestep using init_timestep
__lowerCamelCase : List[Any] = min(int(num_inference_steps * strength ) , a )
__lowerCamelCase : str = max(num_inference_steps - init_timestep , 0 )
__lowerCamelCase : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _snake_case ( self: Union[str, Any] , a: Optional[Any] , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , a: List[str]=None ):
if not isinstance(a , torch.Tensor ):
raise ValueError(F'`image` has to be of type `torch.Tensor` but is {type(a )}' )
__lowerCamelCase : Union[str, Any] = image.to(device=a , dtype=a )
if isinstance(a , a ):
__lowerCamelCase : str = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
__lowerCamelCase : Tuple = torch.cat(a , dim=0 )
else:
__lowerCamelCase : List[Any] = self.vae.encode(a ).latent_dist.sample(a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowerCamelCase : List[str] = 0.1_8_2_1_5 * init_latents
__lowerCamelCase : Union[str, Any] = init_latents.repeat_interleave(a , dim=0 )
__lowerCamelCase : Optional[int] = randn_tensor(init_latents.shape , generator=a , device=a , dtype=a )
# get latents
__lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(a , a , a )
__lowerCamelCase : int = init_latents
return latents
def _snake_case ( self: Optional[int] , a: Any ):
__lowerCamelCase : List[Any] = self.coca_transform(a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__lowerCamelCase : Any = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__lowerCamelCase : str = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def _snake_case ( self: Any , a: Tuple , a: Tuple ):
__lowerCamelCase : Dict = self.feature_extractor.preprocess(a )
__lowerCamelCase : Dict = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
__lowerCamelCase : List[str] = self.clip_model.get_image_features(a )
__lowerCamelCase : Optional[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a )
__lowerCamelCase : Tuple = image_embeddings_clip.repeat_interleave(a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _snake_case ( self: str , a: str , a: int , a: List[Any] , a: str , a: List[Any] , a: Dict , a: int , ):
__lowerCamelCase : Optional[Any] = latents.detach().requires_grad_()
__lowerCamelCase : str = self.scheduler.scale_model_input(a , a )
# predict the noise residual
__lowerCamelCase : Optional[int] = self.unet(a , a , encoder_hidden_states=a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__lowerCamelCase : str = self.scheduler.alphas_cumprod[timestep]
__lowerCamelCase : Dict = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCamelCase : Optional[int] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__lowerCamelCase : Optional[int] = torch.sqrt(a )
__lowerCamelCase : int = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , a ):
__lowerCamelCase : str = self.scheduler.sigmas[index]
__lowerCamelCase : List[Any] = latents - sigma * noise_pred
else:
raise ValueError(F'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowerCamelCase : Optional[int] = 1 / 0.1_8_2_1_5 * sample
__lowerCamelCase : Optional[Any] = self.vae.decode(a ).sample
__lowerCamelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase : Any = transforms.Resize(self.feature_extractor_size )(a )
__lowerCamelCase : Union[str, Any] = self.normalize(a ).to(latents.dtype )
__lowerCamelCase : Tuple = self.clip_model.get_image_features(a )
__lowerCamelCase : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a )
__lowerCamelCase : List[str] = spherical_dist_loss(a , a ).mean() * clip_guidance_scale
__lowerCamelCase : Tuple = -torch.autograd.grad(a , a )[0]
if isinstance(self.scheduler , a ):
__lowerCamelCase : Optional[int] = latents.detach() + grads * (sigma**2)
__lowerCamelCase : List[Any] = noise_pred_original
else:
__lowerCamelCase : str = noise_pred_original - torch.sqrt(a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self: Any , a: Union[torch.FloatTensor, PIL.Image.Image] , a: Union[torch.FloatTensor, PIL.Image.Image] , a: Optional[str] = None , a: Optional[str] = None , a: Optional[int] = 512 , a: Optional[int] = 512 , a: float = 0.6 , a: Optional[int] = 50 , a: Optional[float] = 7.5 , a: Optional[int] = 1 , a: float = 0.0 , a: Optional[float] = 100 , a: Optional[torch.Generator] = None , a: Optional[str] = "pil" , a: bool = True , a: float = 0.8 , a: float = 0.1 , a: float = 0.1 , ):
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(F'You have passed {batch_size} batch_size, but only {len(a )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(a , torch.Generator ) and batch_size > 1:
__lowerCamelCase : List[Any] = [generator] + [None] * (batch_size - 1)
__lowerCamelCase : Dict = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
__lowerCamelCase : Any = [x[0] for x in coca_is_none if x[1]]
__lowerCamelCase : str = ', '.join(a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(a ):
raise ValueError(
F'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
F'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
__lowerCamelCase : Any = self.get_image_description(a )
if style_prompt is None:
if len(a ):
raise ValueError(
F'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
F' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
__lowerCamelCase : Tuple = self.get_image_description(a )
# get prompt text embeddings for content and style
__lowerCamelCase : int = self.tokenizer(
a , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=a , return_tensors='pt' , )
__lowerCamelCase : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__lowerCamelCase : Union[str, Any] = self.tokenizer(
a , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=a , return_tensors='pt' , )
__lowerCamelCase : Any = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__lowerCamelCase : List[Any] = slerp(a , a , a )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase : Any = text_embeddings.repeat_interleave(a , dim=0 )
# set timesteps
__lowerCamelCase : List[Any] = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__lowerCamelCase : Union[str, Any] = {}
if accepts_offset:
__lowerCamelCase : Dict = 1
self.scheduler.set_timesteps(a , **a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__lowerCamelCase , __lowerCamelCase : Dict = self.get_timesteps(a , a , self.device )
__lowerCamelCase : Tuple = timesteps[:1].repeat(a )
# Preprocess image
__lowerCamelCase : Any = preprocess(a , a , a )
__lowerCamelCase : str = self.prepare_latents(
a , a , a , text_embeddings.dtype , self.device , a )
__lowerCamelCase : Dict = preprocess(a , a , a )
__lowerCamelCase : Optional[int] = self.prepare_latents(
a , a , a , text_embeddings.dtype , self.device , a )
__lowerCamelCase : int = slerp(a , a , a )
if clip_guidance_scale > 0:
__lowerCamelCase : List[str] = self.get_clip_image_embeddings(a , a )
__lowerCamelCase : Union[str, Any] = self.get_clip_image_embeddings(a , a )
__lowerCamelCase : Union[str, Any] = slerp(
a , a , a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowerCamelCase : Tuple = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowerCamelCase : Optional[int] = content_text_input.input_ids.shape[-1]
__lowerCamelCase : int = self.tokenizer([''] , padding='max_length' , max_length=a , return_tensors='pt' )
__lowerCamelCase : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__lowerCamelCase : List[Any] = uncond_embeddings.repeat_interleave(a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase : int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowerCamelCase : str = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__lowerCamelCase : List[str] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__lowerCamelCase : Tuple = torch.randn(a , generator=a , device='cpu' , dtype=a ).to(
self.device )
else:
__lowerCamelCase : List[Any] = torch.randn(a , generator=a , device=self.device , dtype=a )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
__lowerCamelCase : List[str] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowerCamelCase : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowerCamelCase : int = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowerCamelCase : Dict = {}
if accepts_eta:
__lowerCamelCase : List[str] = eta
# check if the scheduler accepts generator
__lowerCamelCase : Optional[int] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__lowerCamelCase : Optional[Any] = generator
with self.progress_bar(total=a ):
for i, t in enumerate(a ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase : Union[str, Any] = self.scheduler.scale_model_input(a , a )
# predict the noise residual
__lowerCamelCase : Tuple = self.unet(a , a , encoder_hidden_states=a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__lowerCamelCase , __lowerCamelCase : str = noise_pred.chunk(2 )
__lowerCamelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__lowerCamelCase : str = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__lowerCamelCase , __lowerCamelCase : int = self.cond_fn(
a , a , a , a , a , a , a , )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase : Tuple = self.scheduler.step(a , a , a , **a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowerCamelCase : List[Any] = 1 / 0.1_8_2_1_5 * latents
__lowerCamelCase : Union[str, Any] = self.vae.decode(a ).sample
__lowerCamelCase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase : Union[str, Any] = self.numpy_to_pil(a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=a , nsfw_content_detected=a )
| 194
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCamelCase : str = {
'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] = [
'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoForCausalLM',
'GPTNeoForQuestionAnswering',
'GPTNeoForSequenceClassification',
'GPTNeoForTokenClassification',
'GPTNeoModel',
'GPTNeoPreTrainedModel',
'load_tf_weights_in_gpt_neo',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : int = [
'FlaxGPTNeoForCausalLM',
'FlaxGPTNeoModel',
'FlaxGPTNeoPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 77
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 6
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
_lowerCAmelCase = [True] * (num + 1)
_lowerCAmelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , snake_case_ ):
_lowerCAmelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Dict = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 317
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
def A__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def A__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 317
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> None:
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__UpperCAmelCase ,__UpperCAmelCase : Optional[int] = array[indexa], array[indexa]
def lowerCamelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> None:
'''simple docstring'''
if length > 1:
__UpperCAmelCase : Optional[int] = int(length / 2 )
for i in range(__UpperCamelCase , low + middle ):
comp_and_swap(__UpperCamelCase , __UpperCamelCase , i + middle , __UpperCamelCase )
bitonic_merge(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
bitonic_merge(__UpperCamelCase , low + middle , __UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> None:
'''simple docstring'''
if length > 1:
__UpperCAmelCase : Union[str, Any] = int(length / 2 )
bitonic_sort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , 1 )
bitonic_sort(__UpperCamelCase , low + middle , __UpperCamelCase , 0 )
bitonic_merge(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase : str = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 115
|
import socket
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE__ = socket.gethostname()
SCREAMING_SNAKE_CASE__ = 1_23_12
sock.connect((host, port) )
sock.send(B"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
SCREAMING_SNAKE_CASE__ = sock.recv(10_24 )
if not data:
break
out_file.write(__UpperCamelCase )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 219
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class _lowerCamelCase ( _lowercase ):
def __init__(self , *__a , **__a ) -> None:
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 244
|
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCamelCase = [p / w for p, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCamelCase = sorted(_SCREAMING_SNAKE_CASE )
# declaring useful variables
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCamelCase = sorted_profit_by_weight[length - i - 1]
UpperCamelCase = profit_by_weight.index(_SCREAMING_SNAKE_CASE )
UpperCamelCase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
lowerCAmelCase__ = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
lowerCAmelCase__ = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
lowerCAmelCase__ = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 244
| 1
|
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__lowerCamelCase : Dict = 10
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : list[int] , lowerCAmelCase : int ):
"""simple docstring"""
for i in range(lowerCAmelCase , lowerCAmelCase ):
if array[i] == target:
return i
return -1
def _snake_case ( lowerCAmelCase : list[int] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : str = len(lowerCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = (left + right) // 3 + 1
SCREAMING_SNAKE_CASE_ : Dict = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
SCREAMING_SNAKE_CASE_ : Dict = one_third - 1
elif array[two_third] < target:
SCREAMING_SNAKE_CASE_ : Any = two_third + 1
else:
SCREAMING_SNAKE_CASE_ : List[str] = one_third + 1
SCREAMING_SNAKE_CASE_ : int = two_third - 1
else:
return -1
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : list[int] , lowerCAmelCase : int ):
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = (left + right) // 3 + 1
SCREAMING_SNAKE_CASE_ : Dict = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase , one_third - 1 , lowerCAmelCase , lowerCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase , lowerCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
__lowerCamelCase : str = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__lowerCamelCase : str = int(input('''Enter the number to be found in the list:\n''').strip())
__lowerCamelCase : int = ite_ternary_search(collection, target)
__lowerCamelCase : Tuple = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 18
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE_ : Any = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_A ),_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4 )
self.assertTrue(np.allclose(transpose(_A ),x.transpose() ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),x.transpose((1, 2, 0) ) ) )
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ),np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),np.asarray(transpose(_A,axes=(1, 2, 0) ) ) ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.reshape(_A,(4, 3) ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.reshape(_A,(12, 5) ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Any = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : int = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.asarray(reshape(_A,(4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.asarray(reshape(_A,(12, 5) ) ) ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(1,3,4 )
self.assertTrue(np.allclose(squeeze(_A ),np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.squeeze(_A,axis=2 ) ) )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ),np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.asarray(squeeze(_A,axis=2 ) ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.expand_dims(_A,axis=1 ) ) )
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.asarray(expand_dims(_A,axis=1 ) ) ) )
| 18
| 1
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowercase_ : str , lowercase_ : Union[str, Any]=2 , lowercase_ : Optional[Any]=3 , lowercase_ : List[Any]=4 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[Any]=7 , lowercase_ : Any=True , lowercase_ : Tuple=True , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=True , lowercase_ : str=99 , lowercase_ : str=36 , lowercase_ : int=3 , lowercase_ : int=4 , lowercase_ : Any=37 , lowercase_ : str="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : int=512 , lowercase_ : int=16 , lowercase_ : Dict=2 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=6 , lowercase_ : Tuple=6 , lowercase_ : Any=3 , lowercase_ : Dict=4 , lowercase_ : Any=None , lowercase_ : Tuple=1_000 , ) -> Tuple:
UpperCAmelCase : List[Any] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : List[Any] = num_channels
UpperCAmelCase : Optional[int] = image_size
UpperCAmelCase : int = patch_size
UpperCAmelCase : Tuple = text_seq_length
UpperCAmelCase : int = is_training
UpperCAmelCase : Any = use_input_mask
UpperCAmelCase : Optional[int] = use_token_type_ids
UpperCAmelCase : int = use_labels
UpperCAmelCase : Dict = vocab_size
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : Any = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : List[str] = type_vocab_size
UpperCAmelCase : List[str] = type_sequence_label_size
UpperCAmelCase : int = initializer_range
UpperCAmelCase : Optional[int] = coordinate_size
UpperCAmelCase : Optional[int] = shape_size
UpperCAmelCase : str = num_labels
UpperCAmelCase : str = num_choices
UpperCAmelCase : int = scope
UpperCAmelCase : Tuple = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase : Any = text_seq_length
UpperCAmelCase : int = (image_size // patch_size) ** 2 + 1
UpperCAmelCase : Optional[int] = self.text_seq_length + self.image_seq_length
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase : int = bbox[i, j, 3]
UpperCAmelCase : List[Any] = bbox[i, j, 1]
UpperCAmelCase : Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase : Tuple = bbox[i, j, 2]
UpperCAmelCase : List[str] = bbox[i, j, 0]
UpperCAmelCase : List[str] = t
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : int = None
if self.use_input_mask:
UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase : int = None
if self.use_token_type_ids:
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase : Dict = None
UpperCAmelCase : Dict = None
if self.use_labels:
UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : int = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase_ ( self : str , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : str , lowercase_ : Optional[Any] ) -> Any:
UpperCAmelCase : Dict = LayoutLMvaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# text + image
UpperCAmelCase : Optional[Any] = model(lowercase_ , pixel_values=lowercase_ )
UpperCAmelCase : str = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
UpperCAmelCase : Any = model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , token_type_ids=lowercase_ )
UpperCAmelCase : str = model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase : Dict = model(pixel_values=lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any , lowercase_ : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = self.num_labels
UpperCAmelCase : Union[str, Any] = LayoutLMvaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : int = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Any , lowercase_ : int , lowercase_ : str , lowercase_ : Any , lowercase_ : int , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int ) -> Any:
UpperCAmelCase : Optional[int] = self.num_labels
UpperCAmelCase : int = LayoutLMvaForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : Optional[Any] = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = LayoutLMvaForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : List[str] = model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : str ) -> List[Any]:
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
(
UpperCAmelCase
) : Optional[int] = config_and_inputs
UpperCAmelCase : Optional[Any] = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : List[str] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ : List[str] = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCAmelCase_ ( self : Any , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] ) -> Union[str, Any]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def UpperCAmelCase_ ( self : str ) -> Any:
UpperCAmelCase : Union[str, Any] = LayoutLMvaModelTester(self )
UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : int , lowercase_ : Dict , lowercase_ : Any=False ) -> Optional[Any]:
UpperCAmelCase : str = copy.deepcopy(lowercase_ )
if model_class in get_values(lowercase_ ):
UpperCAmelCase : str = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowercase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase_ ):
UpperCAmelCase : Dict = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
elif model_class in get_values(lowercase_ ):
UpperCAmelCase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
UpperCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
elif model_class in [
*get_values(lowercase_ ),
]:
UpperCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
elif model_class in [
*get_values(lowercase_ ),
]:
UpperCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowercase_ , )
return inputs_dict
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Any ) -> Any:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : Optional[int] = type
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> Any:
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
@slow
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : int = LayoutLMvaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCamelCase( ):
UpperCAmelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
return LayoutLMvaImageProcessor(apply_ocr=lowercase_ ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
UpperCAmelCase : int = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(lowercase_ )
UpperCAmelCase : Dict = self.default_image_processor
UpperCAmelCase : str = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(images=lowercase_ , return_tensors='pt' ).pixel_values.to(lowercase_ )
UpperCAmelCase : int = torch.tensor([[1, 2]] )
UpperCAmelCase : Tuple = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
UpperCAmelCase : Dict = model(
input_ids=input_ids.to(lowercase_ ) , bbox=bbox.to(lowercase_ ) , pixel_values=pixel_values.to(lowercase_ ) , )
# verify the logits
UpperCAmelCase : Optional[Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowercase_ )
UpperCAmelCase : List[str] = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1E-4 ) )
| 357
|
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase__ = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Any = """maskformer"""
UpperCAmelCase_ : Union[str, Any] = {"""hidden_size""": """mask_feature_size"""}
UpperCAmelCase_ : Union[str, Any] = ["""resnet""", """swin"""]
UpperCAmelCase_ : Optional[int] = ["""detr"""]
def __init__( self : Union[str, Any] , lowercase_ : int = 256 , lowercase_ : int = 256 , lowercase_ : float = 0.1 , lowercase_ : bool = False , lowercase_ : Optional[Dict] = None , lowercase_ : Optional[Dict] = None , lowercase_ : float = 0.02 , lowercase_ : float = 1.0 , lowercase_ : float = 1.0 , lowercase_ : float = 1.0 , lowercase_ : float = 20.0 , lowercase_ : Optional[bool] = None , **lowercase_ : int , ) -> Optional[Any]:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase : List[Any] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : List[Any] = backbone_config.pop('model_type' )
UpperCAmelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : List[Any] = config_class.from_dict(lowercase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase : Dict = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase : Tuple = (
decoder_config.pop('model_type' ) if isinstance(lowercase_ , lowercase_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Union[str, Any] = CONFIG_MAPPING[decoder_type]
UpperCAmelCase : str = config_class.from_dict(lowercase_ )
UpperCAmelCase : int = backbone_config
UpperCAmelCase : Tuple = decoder_config
# main feature dimension for the model
UpperCAmelCase : Union[str, Any] = fpn_feature_size
UpperCAmelCase : Optional[Any] = mask_feature_size
# initializer
UpperCAmelCase : Union[str, Any] = init_std
UpperCAmelCase : List[str] = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase : int = cross_entropy_weight
UpperCAmelCase : Tuple = dice_weight
UpperCAmelCase : int = mask_weight
UpperCAmelCase : Any = use_auxiliary_loss
UpperCAmelCase : Tuple = no_object_weight
UpperCAmelCase : str = output_auxiliary_logits
UpperCAmelCase : Dict = self.decoder_config.encoder_attention_heads
UpperCAmelCase : Union[str, Any] = self.decoder_config.num_hidden_layers
super().__init__(**lowercase_ )
@classmethod
def UpperCAmelCase_ ( cls : Any , lowercase_ : PretrainedConfig , lowercase_ : PretrainedConfig , **lowercase_ : Optional[Any] ) -> str:
return cls(
backbone_config=lowercase_ , decoder_config=lowercase_ , **lowercase_ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict[str, any]:
UpperCAmelCase : Optional[int] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : List[str] = self.backbone_config.to_dict()
UpperCAmelCase : Any = self.decoder_config.to_dict()
UpperCAmelCase : Union[str, Any] = self.__class__.model_type
return output
| 280
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 194
|
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
return (-y * np.log(__snake_case ) - (1 - y) * np.log(1 - h )).mean()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = np.dot(__snake_case, __snake_case )
return np.sum(y * scores - np.log(1 + np.exp(__snake_case ) ) )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=7_00_00 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = np.zeros(x.shape[1] )
for iterations in range(__snake_case ):
_UpperCamelCase = np.dot(__snake_case, __snake_case )
_UpperCamelCase = sigmoid_function(__snake_case )
_UpperCamelCase = np.dot(x.T, h - y ) / y.size
_UpperCamelCase = theta - alpha * gradient # updating the weights
_UpperCamelCase = np.dot(__snake_case, __snake_case )
_UpperCamelCase = sigmoid_function(__snake_case )
_UpperCamelCase = cost_function(__snake_case, __snake_case )
if iterations % 1_00 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_a = datasets.load_iris()
_a = iris.data[:, :2]
_a = (iris.target != 0) * 1
_a = 0.1
_a = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
return sigmoid_function(
np.dot(__snake_case, __snake_case ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((_a) , (_a)) = (x[:, 0].min(), x[:, 0].max())
((_a) , (_a)) = (x[:, 1].min(), x[:, 1].max())
((_a) , (_a)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_a = np.c_[xxa.ravel(), xxa.ravel()]
_a = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 194
| 1
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a :
def __init__( self : Tuple , lowercase_ : Any , lowercase_ : List[str]=13 , lowercase_ : List[Any]=7 , lowercase_ : Dict=True , lowercase_ : Optional[int]=True , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=99 , lowercase_ : str=32 , lowercase_ : Dict=5 , lowercase_ : Dict=4 , lowercase_ : Union[str, Any]=37 , lowercase_ : Any="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Dict=512 , lowercase_ : List[Any]=16 , lowercase_ : Optional[int]=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Any=3 , lowercase_ : Any=4 , lowercase_ : Optional[Any]=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = self.vocab_size - 1
def A_ ( self : Optional[int] ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
snake_case_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def A_ ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Tuple , lowercase_ : List[str] , *lowercase_ : Union[str, Any] ):
snake_case_ = OpenAIGPTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ , token_type_ids=lowercase_ , head_mask=lowercase_ )
snake_case_ = model(lowercase_ , token_type_ids=lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : Dict , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : Optional[int] , *lowercase_ : Any ):
snake_case_ = OpenAIGPTLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Dict , lowercase_ : Dict , *lowercase_ : List[Any] ):
snake_case_ = OpenAIGPTDoubleHeadsModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : Dict , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : List[Any] , *lowercase_ : Optional[int] ):
snake_case_ = self.num_labels
snake_case_ = OpenAIGPTForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : List[Any] ):
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case_ = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def A_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Any , lowercase_ : Dict , lowercase_ : Union[str, Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def A_ ( self : Optional[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any]=False ):
snake_case_ = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
snake_case_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ , )
snake_case_ = inputs_dict['''labels''']
snake_case_ = inputs_dict['''labels''']
snake_case_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase_ , )
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def A_ ( self : List[Any] ):
snake_case_ = OpenAIGPTModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowercase_ , n_embd=37 )
def A_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def A_ ( self : List[Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase_ )
def A_ ( self : Dict ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase_ )
def A_ ( self : Optional[int] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase_ )
def A_ ( self : int ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase_ )
@slow
def A_ ( self : str ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = OpenAIGPTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
class a ( unittest.TestCase ):
@slow
def A_ ( self : Any ):
snake_case_ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowercase_ )
snake_case_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase_ ) # the president is
snake_case_ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
snake_case_ = model.generate(lowercase_ , do_sample=lowercase_ )
self.assertListEqual(output_ids[0].tolist() , lowercase_ )
| 72
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a : int = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 72
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 308
|
from heapq import heappop, heappush
import numpy as np
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
lowercase , lowercase : Optional[int] = grid.shape
lowercase : Optional[int] = [-1, 1, 0, 0]
lowercase : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase , lowercase : Union[str, Any] = [(0, source)], set()
lowercase : List[str] = np.full((rows, cols) , np.inf )
lowercase : Dict = 0
lowercase : Dict = np.empty((rows, cols) , dtype=__magic_name__ )
lowercase : Any = None
while queue:
((lowercase) , (lowercase)) : Optional[Any] = heappop(__magic_name__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase : Tuple = []
while (x, y) != source:
path.append((x, y) )
lowercase , lowercase : Optional[int] = predecessors[x, y]
path.append(__magic_name__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__magic_name__ ) ):
lowercase , lowercase : Optional[int] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase : List[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__magic_name__ , (dist + 1, (nx, ny)) )
lowercase : int = dist + 1
lowercase : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , ) -> Tuple:
SCREAMING_SNAKE_CASE = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = apply_ocr
def __A ( self ) -> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessingTester(self )
@property
def __A ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'apply_ocr' ) )
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __A ( self ) -> Dict:
pass
def __A ( self ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , lowerCAmelCase__ )
self.assertIsInstance(encoding.boxes , lowerCAmelCase__ )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __A ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __A ( self ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __A ( self ) -> Optional[Any]:
# with apply_OCR = True
SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
SCREAMING_SNAKE_CASE = Image.open(ds[0]['file'] ).convert('RGB' )
SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCAmelCase__ )
self.assertListEqual(encoding.boxes , lowerCAmelCase__ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 353
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ) -> Any:
# Load configuration defined in the metadata file
with open(SCREAMING_SNAKE_CASE_ ) as metadata_file:
SCREAMING_SNAKE_CASE = json.load(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE_ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['module']
# Load the entity vocab file
SCREAMING_SNAKE_CASE = load_original_entity_vocab(SCREAMING_SNAKE_CASE_ )
# add an entry for [MASK2]
SCREAMING_SNAKE_CASE = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE = AddedToken('<ent>' , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = AddedToken('<ent2>' , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'tokenizer_config.json' ) , 'r' ) as f:
SCREAMING_SNAKE_CASE = json.load(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 'MLukeTokenizer'
with open(os.path.join(SCREAMING_SNAKE_CASE_ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(['@'] )[0]
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(['#'] )[0]
SCREAMING_SNAKE_CASE = state_dict['embeddings.word_embeddings.weight']
SCREAMING_SNAKE_CASE = word_emb[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = word_emb[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
SCREAMING_SNAKE_CASE = state_dict[bias_name]
SCREAMING_SNAKE_CASE = decoder_bias[ent_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = decoder_bias[enta_init_index].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE = F'encoder.layer.{layer_index}.attention.self.'
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE = state_dict['entity_embeddings.entity_embeddings.weight']
SCREAMING_SNAKE_CASE = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
SCREAMING_SNAKE_CASE = state_dict['entity_predictions.bias']
SCREAMING_SNAKE_CASE = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
SCREAMING_SNAKE_CASE = torch.cat([entity_prediction_bias, entity_mask_bias] )
SCREAMING_SNAKE_CASE = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE_ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
SCREAMING_SNAKE_CASE = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
SCREAMING_SNAKE_CASE = state_dict[key]
else:
SCREAMING_SNAKE_CASE = state_dict[key]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
if set(SCREAMING_SNAKE_CASE_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(SCREAMING_SNAKE_CASE_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , task='entity_classification' )
SCREAMING_SNAKE_CASE = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
SCREAMING_SNAKE_CASE = (0, 9)
SCREAMING_SNAKE_CASE = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors='pt' )
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE = torch.Size((1, 33, 7_68) )
SCREAMING_SNAKE_CASE = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
SCREAMING_SNAKE_CASE = torch.Size((1, 1, 7_68) )
SCREAMING_SNAKE_CASE = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
SCREAMING_SNAKE_CASE = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 'Tokyo is the capital of <mask>.'
SCREAMING_SNAKE_CASE = (24, 30)
SCREAMING_SNAKE_CASE = tokenizer(SCREAMING_SNAKE_CASE_ , entity_spans=[span] , return_tensors='pt' )
SCREAMING_SNAKE_CASE = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = encoding['input_ids'][0].tolist()
SCREAMING_SNAKE_CASE = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
SCREAMING_SNAKE_CASE = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = outputs.entity_logits[0][0].argmax().item()
SCREAMING_SNAKE_CASE = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(SCREAMING_SNAKE_CASE_ ) )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> int:
SCREAMING_SNAKE_CASE = ['[MASK]', '[PAD]', '[UNK]']
SCREAMING_SNAKE_CASE = [json.loads(SCREAMING_SNAKE_CASE_ ) for line in open(SCREAMING_SNAKE_CASE_ )]
SCREAMING_SNAKE_CASE = {}
for entry in data:
SCREAMING_SNAKE_CASE = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
SCREAMING_SNAKE_CASE = entity_id
break
SCREAMING_SNAKE_CASE = F'{language}:{entity_name}'
SCREAMING_SNAKE_CASE = entity_id
return new_mapping
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
__UpperCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 38
| 0
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __magic_name__ ( __a : Namespace ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowerCamelCase_ = '''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class __A( __lowerCamelCase ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ (SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=SCREAMING_SNAKE_CASE_ , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(F"Loading model {model_type}" )
UpperCamelCase__ = model_type
UpperCamelCase__ = tf_checkpoint
UpperCamelCase__ = pytorch_dump_output
UpperCamelCase__ = config
UpperCamelCase__ = finetuning_task_name
def UpperCAmelCase_ (self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
if "ckpt" in self._tf_checkpoint.lower():
UpperCamelCase__ = self._tf_checkpoint
UpperCamelCase__ = """"""
else:
UpperCamelCase__ = self._tf_checkpoint
UpperCamelCase__ = """"""
convert_transfo_xl_checkpoint_to_pytorch(
SCREAMING_SNAKE_CASE_ , self._config , self._pytorch_dump_output , SCREAMING_SNAKE_CASE_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 244
|
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __magic_name__ ( __a : List[str] , __a : List[Any] , __a : int , __a : Optional[int]=None , __a : Union[str, Any]=None , __a : Union[str, Any]=None , __a : Union[str, Any]=None , __a : Tuple=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCamelCase__ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase__ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase__ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__a )
if decoder_head_mask is None:
UpperCamelCase__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__a )
if cross_attn_head_mask is None:
UpperCamelCase__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=20 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = bos_token_id
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = self.eos_token_id # Eos Token
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase__ = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase__ = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase__ = self.get_config()
UpperCamelCase__ = prepare_mam_aaa_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return config, inputs_dict
def UpperCAmelCase_ (self ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = MaMaaaModel(config=SCREAMING_SNAKE_CASE_ ).get_decoder().to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCamelCase__ = inputs_dict["""input_ids"""]
UpperCamelCase__ = inputs_dict["""attention_mask"""]
UpperCamelCase__ = inputs_dict["""head_mask"""]
# first forward pass
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )["""last_hidden_state"""]
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )[
"""last_hidden_state"""
]
# select random slice
UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-2 ) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = MaMaaaModel(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = outputs.encoder_last_hidden_state
UpperCamelCase__ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = model.get_encoder()
encoder.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = MaMaaaEncoder.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = model.get_decoder()
decoder.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = MaMaaaDecoder.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __A( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def UpperCAmelCase_ (self ):
UpperCamelCase__ = MaMaaaModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertEqual(info["""missing_keys"""] , [] )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not self.is_encoder_decoder:
UpperCamelCase__ = inputs["""input_ids"""]
del inputs["input_ids"]
else:
UpperCamelCase__ = inputs["""input_ids"""]
UpperCamelCase__ = inputs.get("""decoder_input_ids""" , SCREAMING_SNAKE_CASE_ )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model.get_input_embeddings()
if not self.is_encoder_decoder:
UpperCamelCase__ = wte(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase__ = wte(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = wte(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
model(**SCREAMING_SNAKE_CASE_ )[0]
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ = input_dict["""input_ids"""]
UpperCamelCase__ = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval().to(SCREAMING_SNAKE_CASE_ )
if torch_device == "cuda":
model.half()
model.generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
model.generate(num_beams=4 , do_sample=SCREAMING_SNAKE_CASE_ , early_stopping=SCREAMING_SNAKE_CASE_ , num_return_sequences=3 )
def __magic_name__ ( __a : List[Any] ):
'''simple docstring'''
return torch.tensor(__a , dtype=torch.long , device=__a )
lowerCamelCase_ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __A( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ (self ):
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
UpperCamelCase__ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
UpperCamelCase__ = prepare_mam_aaa_inputs_dict(model.config , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase__ = torch.Size((1, 11, 10_24) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
# change to expected output here
UpperCamelCase__ = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(SCREAMING_SNAKE_CASE_ )
# change to intended input
UpperCamelCase__ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
UpperCamelCase__ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
UpperCamelCase__ = prepare_mam_aaa_inputs_dict(model.config , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase__ = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
# change to expected output here
UpperCamelCase__ = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
UpperCamelCase__ = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
UpperCamelCase__ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
UpperCamelCase__ = model.generate(
input_ids=dct["""input_ids"""].to(SCREAMING_SNAKE_CASE_ ) , attention_mask=dct["""attention_mask"""].to(SCREAMING_SNAKE_CASE_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
UpperCamelCase__ = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
UpperCamelCase__ = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
assert generated == expected_en
| 244
| 1
|
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
__SCREAMING_SNAKE_CASE ="src/transformers"
# Matches is_xxx_available()
__SCREAMING_SNAKE_CASE =re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__SCREAMING_SNAKE_CASE =re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__SCREAMING_SNAKE_CASE =re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__SCREAMING_SNAKE_CASE =re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__SCREAMING_SNAKE_CASE =re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__SCREAMING_SNAKE_CASE =re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__SCREAMING_SNAKE_CASE =re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__SCREAMING_SNAKE_CASE =re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__SCREAMING_SNAKE_CASE =re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__SCREAMING_SNAKE_CASE =re.compile(r"^\s*try:")
# Catches a line with else:
__SCREAMING_SNAKE_CASE =re.compile(r"^\s*else:")
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ):
if _re_test_backend.search(__SCREAMING_SNAKE_CASE ) is None:
return None
lowercase_ : Tuple = [b[0] for b in _re_backend.findall(__SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase_ : str = f.readlines()
lowercase_ : Any = 0
while line_index < len(__SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase_ : Optional[Any] = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowercase_ : Union[str, Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[int] = _re_one_line_import_struct.search(__SCREAMING_SNAKE_CASE ).groups()[0]
lowercase_ : List[Any] = re.findall('\[([^\]]+)\]' , __SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowercase_ : Optional[Any] = _re_import_struct_key_value.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowercase_ : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowercase_ : int = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase_ : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase_ : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase_ : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowercase_ : str = lines[line_index]
if _re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase_ : Dict = _re_import_struct_add_many.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase_ : Tuple = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(__SCREAMING_SNAKE_CASE ) is not None:
lowercase_ : Dict = _re_between_brackets.search(__SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase_ : Optional[int] = [obj[1:-1] for obj in imports if len(__SCREAMING_SNAKE_CASE ) > 0]
objects.extend(__SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(__SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(__SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowercase_ : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase_ : int = []
while (
line_index < len(__SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowercase_ : List[Any] = lines[line_index]
lowercase_ : List[str] = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase_ : Dict = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(__SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase_ : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase_ : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase_ : int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowercase_ : str = lines[line_index]
lowercase_ : Dict = _re_import.search(__SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase_ : List[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ):
def find_duplicates(__SCREAMING_SNAKE_CASE : Union[str, Any] ):
return [k for k, v in collections.Counter(__SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase_ : int = []
for key in import_dict_objects.keys():
lowercase_ : int = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowercase_ : List[str] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase_ : Any = 'base imports' if key == 'none' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def lowercase__( ):
lowercase_ : Union[str, Any] = []
for root, _, files in os.walk(__SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase_ : Union[str, Any] = os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' )
lowercase_ : Optional[int] = parse_init(__SCREAMING_SNAKE_CASE )
if objects is not None:
lowercase_ : Optional[Any] = analyze_results(*__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase_ : Dict = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(__SCREAMING_SNAKE_CASE ) )
def lowercase__( ):
lowercase_ : Any = []
for path, directories, files in os.walk(__SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(__SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
lowercase_ : Dict = str((Path(__SCREAMING_SNAKE_CASE ) / folder).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Optional[int] = short_path.replace(os.path.sep , '.' )
submodules.append(__SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowercase_ : Dict = str((Path(__SCREAMING_SNAKE_CASE ) / fname).relative_to(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Tuple = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(__SCREAMING_SNAKE_CASE )
return submodules
__SCREAMING_SNAKE_CASE =[
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def lowercase__( ):
# This is to make sure the transformers module imported is the one in the repo.
lowercase_ : List[str] = importlib.util.spec_from_file_location(
'transformers' , os.path.join(__SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowercase_ : List[str] = spec.loader.load_module()
lowercase_ : str = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase_ : Any = '\n'.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 353
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 321
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase__ = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 290
|
import argparse
import json
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=a , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=a , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=a , help='where to store parsed gold_data_path file' , )
__A : Optional[int] = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
__A : List[Any] = json.load(a )
for dpr_record in tqdm(a ):
__A : Dict = dpr_record['question']
__A : Any = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(a ) + '\n' )
if __name__ == "__main__":
main()
| 280
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( A_ ):
'''simple docstring'''
def UpperCAmelCase ( self : int ) -> int:
"""simple docstring"""
lowercase__ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case ,'''tf_padding''' ) )
self.parent.assertTrue(hasattr(_snake_case ,'''depth_multiplier''' ) )
class __A :
'''simple docstring'''
def __init__( self : Any ,_snake_case : Tuple ,_snake_case : List[str]=13 ,_snake_case : Dict=3 ,_snake_case : List[str]=32 ,_snake_case : Optional[Any]=0.25 ,_snake_case : Dict=8 ,_snake_case : Any=8 ,_snake_case : str=6 ,_snake_case : str=32 ,_snake_case : Tuple=True ,_snake_case : int=True ,_snake_case : Optional[Any]=True ,_snake_case : Dict="relu6" ,_snake_case : Tuple=1_280 ,_snake_case : Optional[Any]=0.1 ,_snake_case : Union[str, Any]=0.02 ,_snake_case : int=True ,_snake_case : str=True ,_snake_case : Tuple=10 ,_snake_case : str=None ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : str = parent
lowercase__ : List[str] = batch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : int = image_size
lowercase__ : str = depth_multiplier
lowercase__ : List[Any] = depth_divisible_by
lowercase__ : Union[str, Any] = min_depth
lowercase__ : Any = expand_ratio
lowercase__ : Optional[int] = tf_padding
lowercase__ : List[str] = output_stride
lowercase__ : List[str] = first_layer_is_expansion
lowercase__ : Optional[int] = finegrained_output
lowercase__ : Union[str, Any] = hidden_act
lowercase__ : str = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
lowercase__ : Any = classifier_dropout_prob
lowercase__ : Dict = use_labels
lowercase__ : Dict = is_training
lowercase__ : str = num_labels
lowercase__ : Optional[int] = initializer_range
lowercase__ : str = scope
def UpperCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
lowercase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Tuple = None
lowercase__ : int = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels )
lowercase__ : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,depth_divisible_by=self.depth_divisible_by ,min_depth=self.min_depth ,expand_ratio=self.expand_ratio ,output_stride=self.output_stride ,first_layer_is_expansion=self.first_layer_is_expansion ,finegrained_output=self.finegrained_output ,hidden_act=self.hidden_act ,tf_padding=self.tf_padding ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def UpperCAmelCase ( self : List[Any] ,_snake_case : Union[str, Any] ,_snake_case : Tuple ,_snake_case : int ,_snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Tuple = MobileNetVaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : int = model(_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
self.parent.assertEqual(
result.pooler_output.shape ,(self.batch_size, self.last_hidden_size) ,)
def UpperCAmelCase ( self : Dict ,_snake_case : List[Any] ,_snake_case : str ,_snake_case : str ,_snake_case : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ : List[Any] = self.num_labels
lowercase__ : str = MobileNetVaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : List[str] = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Any ,_snake_case : Union[str, Any] ,_snake_case : Optional[Any] ,_snake_case : str ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.num_labels
lowercase__ : Optional[int] = MobileNetVaForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : List[str] = model(_snake_case )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
lowercase__ : Dict = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ : Tuple = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase : Any = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Tuple = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : List[str] = False
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Tuple = MobileNetVaModelTester(self )
lowercase__ : Any = MobileNetVaConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def UpperCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(_snake_case )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : List[Any] = [*signature.parameters.keys()]
lowercase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_snake_case : Optional[int] ,_snake_case : int ,_snake_case : int ):
lowercase__ : Tuple = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = 16
self.assertEqual(len(_snake_case ) ,_snake_case )
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[Any] = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_snake_case )
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Tuple = MobileNetVaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def UpperCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
lowercase__ : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(_snake_case )
lowercase__ : List[str] = self.default_image_processor
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : Tuple = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Tuple = model(**_snake_case )
# verify the logits
lowercase__ : Optional[int] = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[Any] = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
lowercase__ : Optional[Any] = model.to(_snake_case )
lowercase__ : List[Any] = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
lowercase__ : Tuple = prepare_img()
lowercase__ : str = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**_snake_case )
lowercase__ : Any = outputs.logits
# verify the logits
lowercase__ : Optional[int] = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape ,_snake_case )
lowercase__ : List[Any] = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] ,device=_snake_case ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,_snake_case ,atol=1e-4 ) )
| 302
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase_ = logging.get_logger(__name__)
# General docstring
lowerCAmelCase_ = 'RegNetConfig'
# Base docstring
lowerCAmelCase_ = 'facebook/regnet-y-040'
lowerCAmelCase_ = [1, 1_088, 7, 7]
# Image classification docstring
lowerCAmelCase_ = 'facebook/regnet-y-040'
lowerCAmelCase_ = 'tabby, tabby cat'
lowerCAmelCase_ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : int ,_snake_case : int ,_snake_case : int ,_snake_case : int = 3 ,_snake_case : int = 1 ,_snake_case : int = 1 ,_snake_case : Optional[str] = "relu" ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Tuple = nn.Convad(
_snake_case ,_snake_case ,kernel_size=_snake_case ,stride=_snake_case ,padding=kernel_size // 2 ,groups=_snake_case ,bias=_snake_case ,)
lowercase__ : List[Any] = nn.BatchNormad(_snake_case )
lowercase__ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase ( self : List[str] ,_snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.convolution(_snake_case )
lowercase__ : Tuple = self.normalization(_snake_case )
lowercase__ : Tuple = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : RegNetConfig ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = RegNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act )
lowercase__ : str = config.num_channels
def UpperCAmelCase ( self : int ,_snake_case : Dict ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowercase__ : Optional[int] = self.embedder(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : str ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ) -> Any:
"""simple docstring"""
super().__init__()
lowercase__ : List[str] = nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ,stride=_snake_case ,bias=_snake_case )
lowercase__ : Any = nn.BatchNormad(_snake_case )
def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ) -> Tensor:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.convolution(_snake_case )
lowercase__ : Optional[int] = self.normalization(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : int ,_snake_case : int ) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ : Any = nn.AdaptiveAvgPoolad((1, 1) )
lowercase__ : Dict = nn.Sequential(
nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(_snake_case ,_snake_case ,kernel_size=1 ) ,nn.Sigmoid() ,)
def UpperCAmelCase ( self : int ,_snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.pooler(_snake_case )
lowercase__ : Union[str, Any] = self.attention(_snake_case )
lowercase__ : List[str] = hidden_state * attention
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> List[str]:
"""simple docstring"""
super().__init__()
lowercase__ : Tuple = in_channels != out_channels or stride != 1
lowercase__ : Optional[int] = max(1 ,out_channels // config.groups_width )
lowercase__ : str = (
RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase__ : Optional[int] = nn.Sequential(
RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,)
lowercase__ : str = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = hidden_state
lowercase__ : Union[str, Any] = self.layer(_snake_case )
lowercase__ : List[Any] = self.shortcut(_snake_case )
hidden_state += residual
lowercase__ : Optional[int] = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 1 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = in_channels != out_channels or stride != 1
lowercase__ : List[str] = max(1 ,out_channels // config.groups_width )
lowercase__ : Tuple = (
RegNetShortCut(_snake_case ,_snake_case ,stride=_snake_case ) if should_apply_shortcut else nn.Identity()
)
lowercase__ : str = nn.Sequential(
RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_snake_case ,_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ) ,RegNetSELayer(_snake_case ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(_snake_case ,_snake_case ,kernel_size=1 ,activation=_snake_case ) ,)
lowercase__ : Optional[Any] = ACTaFN[config.hidden_act]
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : str = hidden_state
lowercase__ : Optional[Any] = self.layer(_snake_case )
lowercase__ : int = self.shortcut(_snake_case )
hidden_state += residual
lowercase__ : str = self.activation(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : RegNetConfig ,_snake_case : int ,_snake_case : int ,_snake_case : int = 2 ,_snake_case : int = 2 ,) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[Any] = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
lowercase__ : Optional[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_snake_case ,_snake_case ,_snake_case ,stride=_snake_case ,) ,*[layer(_snake_case ,_snake_case ,_snake_case ) for _ in range(depth - 1 )] ,)
def UpperCAmelCase ( self : Tuple ,_snake_case : int ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.layers(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_snake_case : RegNetConfig ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : str = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_snake_case ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
lowercase__ : str = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_snake_case ,config.depths[1:] ):
self.stages.append(RegNetStage(_snake_case ,_snake_case ,_snake_case ,depth=_snake_case ) )
def UpperCAmelCase ( self : List[str] ,_snake_case : Tensor ,_snake_case : bool = False ,_snake_case : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
lowercase__ : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ : int = hidden_states + (hidden_state,)
lowercase__ : Any = stage_module(_snake_case )
if output_hidden_states:
lowercase__ : Optional[int] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_snake_case ,hidden_states=_snake_case )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = RegNetConfig
lowerCAmelCase : List[Any] = "regnet"
lowerCAmelCase : Optional[int] = "pixel_values"
lowerCAmelCase : Union[str, Any] = True
def UpperCAmelCase ( self : Any ,_snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='''fan_out''' ,nonlinearity='''relu''' )
elif isinstance(_snake_case ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Any=False ) -> Optional[int]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
lowercase__ : str = value
lowerCAmelCase_ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." ,A_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Any ) -> Tuple:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : Any = config
lowercase__ : List[str] = RegNetEmbeddings(_snake_case )
lowercase__ : Any = RegNetEncoder(_snake_case )
lowercase__ : Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def UpperCAmelCase ( self : Dict ,_snake_case : Tensor ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
lowercase__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Union[str, Any] = self.embedder(_snake_case )
lowercase__ : List[Any] = self.encoder(
_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case )
lowercase__ : str = encoder_outputs[0]
lowercase__ : Optional[int] = self.pooler(_snake_case )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_snake_case ,pooler_output=_snake_case ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,A_ ,)
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __A ( A_ ):
'''simple docstring'''
def __init__( self : int ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : Optional[Any] = config.num_labels
lowercase__ : int = RegNetModel(_snake_case )
# classification head
lowercase__ : str = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def UpperCAmelCase ( self : List[Any] ,_snake_case : Optional[torch.FloatTensor] = None ,_snake_case : Optional[torch.LongTensor] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
lowercase__ : Any = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : List[Any] = self.regnet(_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case )
lowercase__ : List[str] = outputs.pooler_output if return_dict else outputs[1]
lowercase__ : Union[str, Any] = self.classifier(_snake_case )
lowercase__ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ : List[Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ : Dict = '''single_label_classification'''
else:
lowercase__ : Optional[int] = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase__ : Union[str, Any] = MSELoss()
if self.num_labels == 1:
lowercase__ : List[Any] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
lowercase__ : Tuple = loss_fct(_snake_case ,_snake_case )
elif self.config.problem_type == "single_label_classification":
lowercase__ : Tuple = CrossEntropyLoss()
lowercase__ : str = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ : Any = BCEWithLogitsLoss()
lowercase__ : Union[str, Any] = loss_fct(_snake_case ,_snake_case )
if not return_dict:
lowercase__ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states )
| 302
| 1
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Optional[int] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : str = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
_lowerCamelCase : Optional[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_lowerCamelCase : Any = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
_lowerCamelCase : str = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
_lowerCamelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : int = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
| 72
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Optional[int] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : str = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
_lowerCamelCase : Optional[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_lowerCamelCase : Any = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
_lowerCamelCase : str = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
_lowerCamelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : int = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
| 72
| 1
|
from __future__ import annotations
def __A ( __lowerCamelCase ) -> list[int]:
a = 2
a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__lowerCamelCase )
if n > 1:
factors.append(__lowerCamelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
UpperCamelCase__ = '''nat'''
UpperCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :Any , __magic_name__ :int=4 , __magic_name__ :Dict=3 , __magic_name__ :List[str]=64 , __magic_name__ :Optional[int]=[3, 4, 6, 5] , __magic_name__ :int=[2, 4, 8, 16] , __magic_name__ :str=7 , __magic_name__ :Tuple=3.0 , __magic_name__ :Dict=True , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.1 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :Tuple=1E-5 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :int=None , __magic_name__ :Any=None , **__magic_name__ :Dict , ):
'''simple docstring'''
super().__init__(**__magic_name__ )
a = patch_size
a = num_channels
a = embed_dim
a = depths
a = len(__magic_name__ )
a = num_heads
a = kernel_size
a = mlp_ratio
a = qkv_bias
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = drop_path_rate
a = hidden_act
a = layer_norm_eps
a = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) )
a = layer_scale_init_value
a = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(__magic_name__ ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
| 347
| 1
|
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = R"""\w+[.]\d+"""
SCREAMING_SNAKE_CASE : int = re.findall(lowerCamelCase_ , lowerCamelCase_ )
for pat in pats:
SCREAMING_SNAKE_CASE : List[str] = key.replace(lowerCamelCase_ , """_""".join(pat.split(""".""" ) ) )
return key
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
SCREAMING_SNAKE_CASE : List[str] = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
SCREAMING_SNAKE_CASE : Dict = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
SCREAMING_SNAKE_CASE : Any = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
SCREAMING_SNAKE_CASE : Dict = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
SCREAMING_SNAKE_CASE : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
SCREAMING_SNAKE_CASE : str = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
SCREAMING_SNAKE_CASE : Optional[int] = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=42 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
SCREAMING_SNAKE_CASE : Tuple = flax_model.init_weights(PRNGKey(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = flatten_dict(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
SCREAMING_SNAKE_CASE : Union[str, Any] = rename_key(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
SCREAMING_SNAKE_CASE : Tuple = rename_key_and_reshape_tensor(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE : List[Any] = jnp.asarray(lowerCamelCase_ )
return unflatten_dict(lowerCamelCase_ )
| 323
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
UpperCAmelCase_ : Any = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : Optional[int] , **__lowerCamelCase : Optional[int] ):
requires_backends(self , ["""bs4"""] )
super().__init__(**__lowerCamelCase )
def _A ( self : List[str] , __lowerCamelCase : Any ):
UpperCamelCase :Optional[int] = []
UpperCamelCase :List[str] = []
UpperCamelCase :Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase :Optional[Any] = parent.find_all(child.name , recursive=__lowerCamelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__lowerCamelCase ) else next(i for i, s in enumerate(__lowerCamelCase , 1 ) if s is child ) )
UpperCamelCase :Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _A ( self : Any , __lowerCamelCase : Tuple ):
UpperCamelCase :Any = BeautifulSoup(__lowerCamelCase , """html.parser""" )
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :Tuple = []
UpperCamelCase :Tuple = []
for element in html_code.descendants:
if type(__lowerCamelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase :Any = html.unescape(__lowerCamelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__lowerCamelCase )
UpperCamelCase , UpperCamelCase :Optional[Any] = self.xpath_soup(__lowerCamelCase )
stringaxtag_seq.append(__lowerCamelCase )
stringaxsubs_seq.append(__lowerCamelCase )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _A ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
UpperCamelCase :Tuple = """"""
for tagname, subs in zip(__lowerCamelCase , __lowerCamelCase ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self : Any , __lowerCamelCase : Dict ):
UpperCamelCase :Any = False
# Check that strings has a valid type
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :List[Any] = True
elif isinstance(__lowerCamelCase , (list, tuple) ):
if len(__lowerCamelCase ) == 0 or isinstance(html_strings[0] , __lowerCamelCase ):
UpperCamelCase :Any = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
F"""but is of type {type(__lowerCamelCase )}.""" )
UpperCamelCase :str = bool(isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , __lowerCamelCase )) )
if not is_batched:
UpperCamelCase :Any = [html_strings]
# Get nodes + xpaths
UpperCamelCase :Union[str, Any] = []
UpperCamelCase :str = []
for html_string in html_strings:
UpperCamelCase , UpperCamelCase , UpperCamelCase :int = self.get_three_from_single(__lowerCamelCase )
nodes.append(__lowerCamelCase )
UpperCamelCase :int = []
for node, tag_list, sub_list in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :str = self.construct_xpath(__lowerCamelCase , __lowerCamelCase )
xpath_strings.append(__lowerCamelCase )
xpaths.append(__lowerCamelCase )
# return as Dict
UpperCamelCase :Optional[int] = {"""nodes""": nodes, """xpaths""": xpaths}
UpperCamelCase :Any = BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
return encoded_inputs
| 38
| 0
|
'''simple docstring'''
def UpperCamelCase ( a , a ) -> bool:
'''simple docstring'''
__magic_name__ = len(a ) + 1
__magic_name__ = len(a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__magic_name__ = [[0 for i in range(a )] for j in range(a )]
# since string of zero length match pattern of zero length
__magic_name__ = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , a ):
__magic_name__ = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , a ):
__magic_name__ = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , a ):
for j in range(1 , a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__magic_name__ = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__magic_name__ = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__magic_name__ = dp[i - 1][j]
else:
__magic_name__ = 0
else:
__magic_name__ = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_lowerCAmelCase = "aab"
_lowerCAmelCase = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 98
|
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , a__ : Any , a__ : Tuple=sys.maxsize ):
__magic_name__ = '''bilinear'''
__magic_name__ = max_size
__magic_name__ = short_edge_length
def __call__( self : Tuple , a__ : List[str] ):
__magic_name__ = []
for img in imgs:
__magic_name__ , __magic_name__ = img.shape[:2]
# later: provide list and randomly choose index for resize
__magic_name__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
__magic_name__ = size * 1.0 / min(a__ , a__ )
if h < w:
__magic_name__ , __magic_name__ = size, scale * w
else:
__magic_name__ , __magic_name__ = scale * h, size
if max(a__ , a__ ) > self.max_size:
__magic_name__ = self.max_size * 1.0 / max(a__ , a__ )
__magic_name__ = newh * scale
__magic_name__ = neww * scale
__magic_name__ = int(neww + 0.5 )
__magic_name__ = int(newh + 0.5 )
if img.dtype == np.uinta:
__magic_name__ = Image.fromarray(a__ )
__magic_name__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
__magic_name__ = np.asarray(a__ )
else:
__magic_name__ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
__magic_name__ = nn.functional.interpolate(
a__ , (newh, neww) , mode=self.interp_method , align_corners=a__ ).squeeze(0 )
img_augs.append(a__ )
return img_augs
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , a__ : Tuple ):
__magic_name__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
__magic_name__ = cfg.INPUT.FORMAT
__magic_name__ = cfg.SIZE_DIVISIBILITY
__magic_name__ = cfg.PAD_VALUE
__magic_name__ = cfg.INPUT.MAX_SIZE_TEST
__magic_name__ = cfg.MODEL.DEVICE
__magic_name__ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__magic_name__ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__magic_name__ = lambda a__ : (x - self.pixel_mean) / self.pixel_std
def snake_case__ ( self : Union[str, Any] , a__ : Dict ):
__magic_name__ = tuple(max(a__ ) for s in zip(*[img.shape for img in images] ) )
__magic_name__ = [im.shape[-2:] for im in images]
__magic_name__ = [
nn.functional.pad(
a__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(a__ , a__ )
]
return torch.stack(a__ ), torch.tensor(a__ )
def __call__( self : Dict , a__ : Dict , a__ : List[str]=False ):
with torch.no_grad():
if not isinstance(a__ , a__ ):
__magic_name__ = [images]
if single_image:
assert len(a__ ) == 1
for i in range(len(a__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(a__ , images.pop(a__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
a__ , torch.as_tensor(img_tensorize(images.pop(a__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
__magic_name__ = torch.tensor([im.shape[:2] for im in images] )
__magic_name__ = self.aug(a__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__magic_name__ = [self.normalizer(a__ ) for x in images]
# now pad them to do the following operations
__magic_name__ , __magic_name__ = self.pad(a__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__magic_name__ = torch.true_divide(a__ , a__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def UpperCamelCase ( a , a ) -> List[Any]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def UpperCamelCase ( a , a ) -> Any:
'''simple docstring'''
assert torch.isfinite(a ).all(), "Box tensor contains infinite or NaN!"
__magic_name__ , __magic_name__ = box_size
tensor[:, 0].clamp_(min=0 , max=a )
tensor[:, 1].clamp_(min=0 , max=a )
tensor[:, 2].clamp_(min=0 , max=a )
tensor[:, 3].clamp_(min=0 , max=a )
| 98
| 1
|
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] = None ) -> str:
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__snake_case : int = quote(__UpperCamelCase )
return hfh.hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' , revision=__UpperCamelCase )
| 172
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 2000000 )-> int:
UpperCamelCase = [0 for i in range(n + 1 )]
UpperCamelCase = 1
UpperCamelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __UpperCamelCase ):
UpperCamelCase = 1
UpperCamelCase = 0
for i in range(__UpperCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'{solution() = }')
| 321
| 0
|
from math import pi, sqrt
def lowerCamelCase__ ( a ) -> float:
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(a ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(a )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ) -> None:
assert gamma(0.5 ) == sqrt(a )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase__ : Dict = 1.0
while num:
UpperCAmelCase__ : Optional[Any] = float(input('Gamma of: '))
print(F"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 364
|
def lowerCamelCase__ ( a = 10 ) -> str:
if not isinstance(a , a ) or n < 0:
raise ValueError('''Invalid input''' )
_A: int = 10**n
_A: List[Any] = 2_84_33 * (pow(2 , 7_83_04_57 , a )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 301
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : UNetaDModel
__lowerCamelCase : ScoreSdeVeScheduler
def __init__( self : Optional[Any] , __lowercase : UNetaDModel , __lowercase : ScoreSdeVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__lowercase , scheduler=__lowercase )
@torch.no_grad()
def __call__( self : Tuple , __lowercase : int = 1 , __lowercase : int = 2000 , __lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowercase : Optional[str] = "pil" , __lowercase : bool = True , **__lowercase : str , ):
'''simple docstring'''
__a = self.unet.config.sample_size
__a = (batch_size, 3, img_size, img_size)
__a = self.unet
__a = randn_tensor(__lowercase , generator=__lowercase ) * self.scheduler.init_noise_sigma
__a = sample.to(self.device )
self.scheduler.set_timesteps(__lowercase )
self.scheduler.set_sigmas(__lowercase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__a = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__a = self.unet(__lowercase , __lowercase ).sample
__a = self.scheduler.step_correct(__lowercase , __lowercase , generator=__lowercase ).prev_sample
# prediction step
__a = model(__lowercase , __lowercase ).sample
__a = self.scheduler.step_pred(__lowercase , __lowercase , __lowercase , generator=__lowercase )
__a , __a = output.prev_sample, output.prev_sample_mean
__a = sample_mean.clamp(0 , 1 )
__a = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(__lowercase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__lowercase )
| 302
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , """width_multiplier""" ) )
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowercase : Union[str, Any] , __lowercase : Dict=13 , __lowercase : int=64 , __lowercase : Tuple=2 , __lowercase : Tuple=3 , __lowercase : Tuple="swish" , __lowercase : List[Any]=3 , __lowercase : List[str]=32 , __lowercase : int=0.1 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[int]=True , __lowercase : Dict=True , __lowercase : Tuple=10 , __lowercase : str=None , __lowercase : Optional[Any]=0.25 , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple ):
'''simple docstring'''
__a = MobileViTVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : int , __lowercase : str , __lowercase : Any , __lowercase : int , __lowercase : List[str] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase : Any =(
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Dict =False
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : int =False
__lowerCamelCase : Any =False
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__lowercase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(__lowercase : List[str] , __lowercase : Optional[int] , __lowercase : List[str] ):
__a = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(__lowercase ) , __lowercase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(__lowercase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__lowercase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__a = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowercase )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=__lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowercase )
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowercase )
| 302
| 1
|
"""simple docstring"""
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ):
_lowercase : Optional[int] = set_counts
_lowercase : str = max(UpperCAmelCase_ )
_lowercase : str = len(UpperCAmelCase_ )
_lowercase : Any = [1] * num_sets
_lowercase : str = list(range(UpperCAmelCase_ ) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Any = self.get_parent(UpperCAmelCase_ )
_lowercase : str = self.get_parent(UpperCAmelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
_lowercase : Tuple = 0
_lowercase : List[str] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
_lowercase : List[Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
_lowercase : List[Any] = 0
_lowercase : List[str] = src_parent
_lowercase : Union[str, Any] = self.set_counts[src_parent]
_lowercase : Optional[int] = max(self.max_set ,UpperCAmelCase_ )
return True
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
if self.parents[disj_set] == disj_set:
return disj_set
_lowercase : Dict = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 336
|
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
UpperCAmelCase: List[Any] = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
def constraint_to_multiple_of(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=None ):
_lowercase : Union[str, Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowercase : str = math.floor(val / multiple ) * multiple
if x < min_val:
_lowercase : Dict = math.ceil(val / multiple ) * multiple
return x
_lowercase : List[str] = (output_size, output_size) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else output_size
_lowercase , _lowercase : List[Any] = get_image_size(__UpperCAmelCase )
_lowercase , _lowercase : Union[str, Any] = output_size
# determine new height and width
_lowercase : str = output_height / input_height
_lowercase : List[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowercase : str = scale_width
else:
# fit height
_lowercase : int = scale_height
_lowercase : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=__UpperCAmelCase )
_lowercase : Optional[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=__UpperCAmelCase )
return (new_height, new_width)
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["pixel_values"]
def __init__( self ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = PILImageResampling.BILINEAR ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = 1 / 2_55 ,UpperCAmelCase_ = True ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : List[Any] = size if size is not None else {"""height""": 3_84, """width""": 3_84}
_lowercase : str = get_size_dict(UpperCAmelCase_ )
_lowercase : Tuple = do_resize
_lowercase : Any = size
_lowercase : List[Any] = keep_aspect_ratio
_lowercase : Any = ensure_multiple_of
_lowercase : str = resample
_lowercase : Optional[Any] = do_rescale
_lowercase : List[Any] = rescale_factor
_lowercase : Union[str, Any] = do_normalize
_lowercase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowercase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = False ,UpperCAmelCase_ = 1 ,UpperCAmelCase_ = PILImageResampling.BICUBIC ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
_lowercase : Optional[Any] = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
_lowercase : Dict = get_resize_output_image_size(
UpperCAmelCase_ ,output_size=(size["""height"""], size["""width"""]) ,keep_aspect_ratio=UpperCAmelCase_ ,multiple=UpperCAmelCase_ ,)
return resize(UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return rescale(UpperCAmelCase_ ,scale=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,**UpperCAmelCase_ ,):
return normalize(UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ,data_format=UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,UpperCAmelCase_ = ChannelDimension.FIRST ,**UpperCAmelCase_ ,):
_lowercase : Any = do_resize if do_resize is not None else self.do_resize
_lowercase : List[str] = size if size is not None else self.size
_lowercase : int = get_size_dict(UpperCAmelCase_ )
_lowercase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowercase : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowercase : List[str] = resample if resample is not None else self.resample
_lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : str = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_lowercase : int = image_std if image_std is not None else self.image_std
_lowercase : Union[str, Any] = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowercase : int = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
_lowercase : Union[str, Any] = [self.resize(image=UpperCAmelCase_ ,size=UpperCAmelCase_ ,resample=UpperCAmelCase_ ) for image in images]
if do_rescale:
_lowercase : int = [self.rescale(image=UpperCAmelCase_ ,scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
_lowercase : str = [self.normalize(image=UpperCAmelCase_ ,mean=UpperCAmelCase_ ,std=UpperCAmelCase_ ) for image in images]
_lowercase : Tuple = [to_channel_dimension_format(UpperCAmelCase_ ,UpperCAmelCase_ ) for image in images]
_lowercase : int = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase_ ,tensor_type=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ):
_lowercase : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(UpperCAmelCase_ ):
_lowercase : Tuple = target_sizes.numpy()
_lowercase : Optional[Any] = []
for idx in range(len(UpperCAmelCase_ ) ):
_lowercase : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=UpperCAmelCase_ )
_lowercase : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase_ )
else:
_lowercase : Union[str, Any] = logits.argmax(dim=1 )
_lowercase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 336
| 1
|
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = 2
snake_case_ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.append(_SCREAMING_SNAKE_CASE )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[str] = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : int ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Dict ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[Any] = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any] ) ->Tuple:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Tuple = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[Any] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[Any] ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Tuple = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[Any] ) ->str:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] ) ->int:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Dict ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Optional[Any] ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[Any] ) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[Any] = ["""sentencepiece"""]
def __init__( self : List[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : str ) ->Optional[int]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = ["""sentencepiece"""]
def __init__( self : Tuple , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int] ) ->Dict:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Dict = ["""sentencepiece"""]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : List[str] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: int = ["""sentencepiece"""]
def __init__( self : Dict , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int] ) ->Any:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: List[str] = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
class __A (metaclass=snake_case__):
'''simple docstring'''
__lowercase: Any = ["""sentencepiece"""]
def __init__( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ) ->str:
"""simple docstring"""
requires_backends(self , ["""sentencepiece"""] )
| 347
| 1
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase )
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : str = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase_ : ClassVar[Features] = Features({'''image''': Image()} )
UpperCamelCase_ : ClassVar[Features] = Features({'''labels''': ClassLabel} )
UpperCamelCase_ : str = "image"
UpperCamelCase_ : str = "labels"
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Tuple ) -> Dict:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , lowerCAmelCase__ ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
_UpperCAmelCase : Optional[int] = copy.deepcopy(self )
_UpperCAmelCase : Optional[int] = self.label_schema.copy()
_UpperCAmelCase : List[str] = features[self.label_column]
_UpperCAmelCase : Optional[Any] = label_schema
return task_template
@property
def _lowerCAmelCase ( self : str ) -> Dict[str, str]:
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 17
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __UpperCAmelCase ( a_: List[str] ):
_UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
_UpperCAmelCase : Optional[int] = key.replace("module.encoder", "glpn.encoder" )
if key.startswith("module.decoder" ):
_UpperCAmelCase : List[Any] = key.replace("module.decoder", "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_UpperCAmelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )]
_UpperCAmelCase : Union[str, Any] = key.replace(f"""patch_embed{idx}""", f"""patch_embeddings.{int(a_ )-1}""" )
if "norm" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("norm", "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_UpperCAmelCase : str = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
_UpperCAmelCase : Optional[Any] = key.replace(f"""layer_norm{idx}""", f"""layer_norm.{int(a_ )-1}""" )
if "layer_norm1" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("layer_norm1", "layer_norm_1" )
if "layer_norm2" in key:
_UpperCAmelCase : List[Any] = key.replace("layer_norm2", "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_UpperCAmelCase : Optional[Any] = key[key.find("block" ) + len("block" )]
_UpperCAmelCase : List[str] = key.replace(f"""block{idx}""", f"""block.{int(a_ )-1}""" )
if "attn.q" in key:
_UpperCAmelCase : Optional[int] = key.replace("attn.q", "attention.self.query" )
if "attn.proj" in key:
_UpperCAmelCase : List[str] = key.replace("attn.proj", "attention.output.dense" )
if "attn" in key:
_UpperCAmelCase : Dict = key.replace("attn", "attention.self" )
if "fc1" in key:
_UpperCAmelCase : List[Any] = key.replace("fc1", "dense1" )
if "fc2" in key:
_UpperCAmelCase : List[Any] = key.replace("fc2", "dense2" )
if "linear_pred" in key:
_UpperCAmelCase : Any = key.replace("linear_pred", "classifier" )
if "linear_fuse" in key:
_UpperCAmelCase : Dict = key.replace("linear_fuse.conv", "linear_fuse" )
_UpperCAmelCase : List[str] = key.replace("linear_fuse.bn", "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_UpperCAmelCase : List[Any] = key[key.find("linear_c" ) + len("linear_c" )]
_UpperCAmelCase : Tuple = key.replace(f"""linear_c{idx}""", f"""linear_c.{int(a_ )-1}""" )
if "bot_conv" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("bot_conv", "0.convolution" )
if "skip_conv1" in key:
_UpperCAmelCase : Optional[int] = key.replace("skip_conv1", "1.convolution" )
if "skip_conv2" in key:
_UpperCAmelCase : Optional[int] = key.replace("skip_conv2", "2.convolution" )
if "fusion1" in key:
_UpperCAmelCase : List[str] = key.replace("fusion1", "1.fusion" )
if "fusion2" in key:
_UpperCAmelCase : List[str] = key.replace("fusion2", "2.fusion" )
if "fusion3" in key:
_UpperCAmelCase : Optional[Any] = key.replace("fusion3", "3.fusion" )
if "fusion" in key and "conv" in key:
_UpperCAmelCase : List[Any] = key.replace("conv", "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
_UpperCAmelCase : Optional[int] = key.replace("module.last_layer_depth", "head.head" )
_UpperCAmelCase : int = value
return new_state_dict
def __UpperCAmelCase ( a_: str, a_: List[Any] ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_UpperCAmelCase : Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_UpperCAmelCase : Union[str, Any] = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_UpperCAmelCase : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
_UpperCAmelCase : Dict = kv_bias[: config.hidden_sizes[i]]
_UpperCAmelCase : Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
_UpperCAmelCase : Optional[Any] = kv_bias[config.hidden_sizes[i] :]
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : List[Any] = Image.open(requests.get(a_, stream=a_ ).raw )
return image
@torch.no_grad()
def __UpperCAmelCase ( a_: Tuple, a_: Any, a_: Optional[Any]=False, a_: List[Any]=None ):
_UpperCAmelCase : Optional[Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_UpperCAmelCase : Dict = GLPNImageProcessor()
# prepare image
_UpperCAmelCase : List[Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(images=a_, return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
_UpperCAmelCase : Union[str, Any] = torch.load(a_, map_location=torch.device("cpu" ) )
# rename keys
_UpperCAmelCase : List[str] = rename_keys(a_ )
# key and value matrices need special treatment
read_in_k_v(a_, a_ )
# create HuggingFace model and load state dict
_UpperCAmelCase : List[str] = GLPNForDepthEstimation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
_UpperCAmelCase : Dict = model(a_ )
_UpperCAmelCase : List[str] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_UpperCAmelCase : Optional[Any] = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
_UpperCAmelCase : Tuple = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
_UpperCAmelCase : Dict = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3], a_, atol=1e-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add model", use_temp_dir=a_, )
image_processor.push_to_hub(
repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add image processor", use_temp_dir=a_, )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
__a = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 17
| 1
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCAmelCase__ : Optional[int] = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCAmelCase__ : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a_ ( lowerCamelCase ):
if "://" in dataset_path:
UpperCAmelCase__ = dataset_path.split('://' )[1]
return dataset_path
def a_ ( lowerCamelCase ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = not is_remote_filesystem(lowerCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCamelCase ) , fs._strip_protocol(lowerCamelCase ) )
else:
fs.mv(lowerCamelCase , lowerCamelCase , recursive=lowerCamelCase )
def a_ ( ):
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = threading.Lock()
| 98
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : str = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98
| 1
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__A = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ (UpperCAmelCase_ : ArgumentParser) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Model's type.")
train_parser.add_argument(
"--tf_checkpoint" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="TensorFlow checkpoint path or folder.")
train_parser.add_argument(
"--pytorch_dump_output" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to the PyTorch saved model output.")
train_parser.add_argument("--config" , type=UpperCAmelCase_ , default="" , help="Configuration file path or folder.")
train_parser.add_argument(
"--finetuning_task_name" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=UpperCAmelCase_)
def __init__(self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , *UpperCAmelCase_ : Optional[int] , ) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Dict =logging.get_logger("transformers-cli/converting")
self._logger.info(F"""Loading model {model_type}""")
lowerCamelCase__: Any =model_type
lowerCamelCase__: Optional[int] =tf_checkpoint
lowerCamelCase__: Any =pytorch_dump_output
lowerCamelCase__: Union[str, Any] =config
lowerCamelCase__: str =finetuning_task_name
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Tuple:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
if "ckpt" in self._tf_checkpoint.lower():
lowerCamelCase__: Tuple =self._tf_checkpoint
lowerCamelCase__: List[str] =""
else:
lowerCamelCase__: Any =self._tf_checkpoint
lowerCamelCase__: Dict =""
convert_transfo_xl_checkpoint_to_pytorch(
UpperCAmelCase_ , self._config , self._pytorch_dump_output , UpperCAmelCase_)
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(UpperCAmelCase_)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]")
| 273
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = CTRLTokenizer
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__: Optional[Any] =["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
lowerCamelCase__: Optional[Any] =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: Union[str, Any] =["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
lowerCamelCase__: Union[str, Any] ={"unk_token": "<unk>"}
lowerCamelCase__: Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
lowerCamelCase__: Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(UpperCAmelCase_) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : List[str] , **UpperCAmelCase_ : str) ->List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Optional[int]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int ="adapt react readapt apt"
lowerCamelCase__: int ="adapt react readapt apt"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
lowerCamelCase__: Union[str, Any] ="adapt react readapt apt"
lowerCamelCase__: Tuple ="adapt re@@ a@@ c@@ t re@@ adapt apt".split()
lowerCamelCase__: Dict =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =tokens + [tokenizer.unk_token]
lowerCamelCase__: List[str] =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 273
| 1
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCamelCase_ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCamelCase_ (datasets.BuilderConfig ):
__magic_name__ = None
def snake_case ( A__ ,A__ ,):
import pyspark
def generate_fn():
UpperCAmelCase_ : Optional[int] = df.select("*" ,pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
UpperCAmelCase_ : Union[str, Any] = df_with_partition_id.select("*" ).where(F"""part_id = {partition_id}""" ).drop("part_id" )
UpperCAmelCase_ : Dict = partition_df.collect()
UpperCAmelCase_ : Tuple = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class UpperCamelCase_ (_BaseExamplesIterable ):
def __init__( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=None , ) -> Any:
UpperCAmelCase_ : Tuple = df
UpperCAmelCase_ : Any = partition_order or range(self.df.rdd.getNumPartitions() )
UpperCAmelCase_ : List[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Optional[int] ) -> str:
yield from self.generate_examples_fn()
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : str ) -> "SparkExamplesIterable":
UpperCAmelCase_ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(snake_case_ )
return SparkExamplesIterable(self.df , partition_order=snake_case_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str ) -> "SparkExamplesIterable":
UpperCAmelCase_ : str = self.split_shard_indices_by_worker(snake_case_ , snake_case_ )
return SparkExamplesIterable(self.df , partition_order=snake_case_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return len(self.partition_order )
class UpperCamelCase_ (datasets.DatasetBuilder ):
__magic_name__ = SparkConfig
def __init__( self : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any = None , lowerCAmelCase_ : List[Any] = None , **lowerCAmelCase_ : Union[str, Any] , ) -> Optional[int]:
import pyspark
UpperCAmelCase_ : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
UpperCAmelCase_ : Union[str, Any] = df
UpperCAmelCase_ : Optional[int] = working_dir
super().__init__(
cache_dir=snake_case_ , config_name=str(self.df.semanticHash() ) , **snake_case_ , )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
# Returns the path of the created file.
def create_cache_and_write_probe(lowerCAmelCase_ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=snake_case_ )
UpperCAmelCase_ : Tuple = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(snake_case_ , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
UpperCAmelCase_ : Tuple = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(snake_case_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
return datasets.DatasetInfo(features=self.config.features )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str ) -> Tuple:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Dict ) -> List[str]:
import pyspark
def get_arrow_batch_size(lowerCAmelCase_ : Optional[Any] ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
UpperCAmelCase_ : Tuple = self.df.count()
UpperCAmelCase_ : Optional[Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
UpperCAmelCase_ : Optional[Any] = (
self.df.limit(snake_case_ )
.repartition(1 )
.mapInArrow(snake_case_ , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
UpperCAmelCase_ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
UpperCAmelCase_ : List[Any] = min(snake_case_ , int(approx_total_size / max_shard_size ) )
UpperCAmelCase_ : List[Any] = self.df.repartition(snake_case_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
UpperCAmelCase_ : List[Any] = ParquetWriter if file_format == "parquet" else ArrowWriter
UpperCAmelCase_ : Any = os.path.join(self._working_dir , os.path.basename(snake_case_ ) ) if self._working_dir else fpath
UpperCAmelCase_ : Optional[int] = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
UpperCAmelCase_ : str = self.config.features
UpperCAmelCase_ : Optional[Any] = self._writer_batch_size
UpperCAmelCase_ : List[Any] = self._fs.storage_options
def write_arrow(lowerCAmelCase_ : Union[str, Any] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
UpperCAmelCase_ : Union[str, Any] = pyspark.TaskContext().taskAttemptId()
UpperCAmelCase_ : Any = next(snake_case_ , snake_case_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[Any] = writer_class(
features=snake_case_ , path=working_fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , writer_batch_size=snake_case_ , storage_options=snake_case_ , embed_local_files=snake_case_ , )
UpperCAmelCase_ : Optional[int] = pa.Table.from_batches([first_batch] )
writer.write_table(snake_case_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
UpperCAmelCase_ , UpperCAmelCase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
UpperCAmelCase_ : Optional[int] = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , writer_batch_size=snake_case_ , storage_options=snake_case_ , embed_local_files=snake_case_ , )
UpperCAmelCase_ : str = pa.Table.from_batches([batch] )
writer.write_table(snake_case_ )
if writer._num_bytes > 0:
UpperCAmelCase_ , UpperCAmelCase_ : int = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(snake_case_ ) ):
UpperCAmelCase_ : List[Any] = os.path.join(os.path.dirname(snake_case_ ) , os.path.basename(snake_case_ ) )
shutil.move(snake_case_ , snake_case_ )
UpperCAmelCase_ : Union[str, Any] = (
self.df.mapInArrow(snake_case_ , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str = "arrow" , lowerCAmelCase_ : List[str] = None , lowerCAmelCase_ : Union[str, Any] = None , **lowerCAmelCase_ : List[Any] , ) -> List[Any]:
self._validate_cache_dir()
UpperCAmelCase_ : Any = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(snake_case_ )
UpperCAmelCase_ : Any = not is_remote_filesystem(self._fs )
UpperCAmelCase_ : str = os.path.join if is_local else posixpath.join
UpperCAmelCase_ : Optional[Any] = "-TTTTT-SSSSS-of-NNNNN"
UpperCAmelCase_ : Union[str, Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
UpperCAmelCase_ : List[Any] = path_join(self._output_dir , snake_case_ )
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : List[Any] = []
for task_id, content in self._prepare_split_single(snake_case_ , snake_case_ , snake_case_ ):
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : List[str] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(snake_case_ )
UpperCAmelCase_ : Dict = total_num_examples
UpperCAmelCase_ : List[str] = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
UpperCAmelCase_ : Optional[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
UpperCAmelCase_ : Optional[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , ):
rename(
snake_case_ , fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , f"""{global_shard_id:05d}""" ).replace("NNNNN" , f"""{total_shards:05d}""" ) , )
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : int = 0
for i in range(len(snake_case_ ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = task_id_and_num_shards[i]
for shard_id in range(snake_case_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(snake_case_ , len(snake_case_ ) ).map(lambda lowerCAmelCase_ : _rename_shard(*snake_case_ ) ).collect()
else:
# don't use any pattern
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Union[str, Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , fpath.replace(snake_case_ , "" ) , )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : List[Any] , ) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df )
| 268
|
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE_ = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=16 , snake_case_=13 , snake_case_=7 , snake_case_=14 , snake_case_=10 , snake_case_=19 , snake_case_=5 , snake_case_=4 , snake_case_=True , snake_case_=16 , snake_case_=2 , snake_case_=4 , snake_case_=4 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=[1, 2, 3, 4, 5] , snake_case_=25 , snake_case_=5 , ) -> Tuple:
__lowerCAmelCase = d_model
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = prediction_length
__lowerCAmelCase = context_length
__lowerCAmelCase = cardinality
__lowerCAmelCase = num_time_features
__lowerCAmelCase = lags_sequence
__lowerCAmelCase = embedding_dimension
__lowerCAmelCase = is_training
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = context_length
__lowerCAmelCase = prediction_length + label_length
__lowerCAmelCase = label_length
__lowerCAmelCase = moving_average
__lowerCAmelCase = autocorrelation_factor
def A__ ( self ) -> List[Any]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A__ ( self , snake_case_ ) -> Any:
__lowerCAmelCase = config.context_length + max(config.lags_sequence )
__lowerCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def A__ ( self ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self , snake_case_ , snake_case_ ) -> int:
__lowerCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
__lowerCAmelCase = model(**snake_case_ )
__lowerCAmelCase = outputs.encoder_last_hidden_state
__lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model.create_network_inputs(**snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowerCAmelCase_ ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_snake_case = (AutoformerForPrediction,) if is_torch_available() else ()
_snake_case = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = AutoformerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def A__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def A__ ( self ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["""missing_keys"""] , [] )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def A__ ( self ) -> Any:
pass
def A__ ( self ) -> str:
__lowerCAmelCase = inspect.signature(getattr(snake_case_ , """forward""" ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester , """seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """d_model""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """num_attention_heads""" , snake_case_ )
__lowerCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
__lowerCAmelCase = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A__ ( self ) -> int:
super().test_retain_grad_hidden_states_attentions()
def lowercase (_lowerCAmelCase="train-batch.pt" ):
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_lowerCAmelCase , repo_type="""dataset""" )
__lowerCAmelCase = torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )
return batch
@require_torch
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> int:
__lowerCAmelCase = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch()
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowerCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowerCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> Any:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowerCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
__lowerCAmelCase = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=snake_case_ )
__lowerCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
| 301
| 0
|
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_lowercase : List[str] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
_a = None
@experimental
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return _map_with_joblib(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def snake_case__ ( __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Any =num_proc if num_proc <= len(__lowerCamelCase ) else len(__lowerCamelCase )
lowerCamelCase__ : Any =[] # We organize the splits ourselve (contiguous splits)
for index in range(__lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] =len(__lowerCamelCase ) // num_proc
lowerCamelCase__ : Union[str, Any] =len(__lowerCamelCase ) % num_proc
lowerCamelCase__ : Dict =div * index + min(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : str =start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__lowerCamelCase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f'''Error dividing inputs iterable among processes. '''
f'''Total number of objects {len(__lowerCamelCase )}, '''
f'''length: {sum(len(i[1] ) for i in split_kwds )}''' )
logger.info(
f'''Spawning {num_proc} processes for {len(__lowerCamelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' )
lowerCamelCase__ : Optional[Any] =None, None
if not disable_tqdm:
lowerCamelCase__ : Optional[Any] =(RLock(),), tqdm.set_lock
with Pool(__lowerCamelCase , initargs=__lowerCamelCase , initializer=__lowerCamelCase ) as pool:
lowerCamelCase__ : Any =pool.map(__lowerCamelCase , __lowerCamelCase )
logger.info(f'''Finished {num_proc} processes''' )
lowerCamelCase__ : Tuple =[obj for proc_res in mapped for obj in proc_res]
logger.info(f'''Unpacked {len(__lowerCamelCase )} objects''' )
return mapped
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ):
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__lowerCamelCase ):
return joblib.Parallel()(
joblib.delayed(__lowerCamelCase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowerCamelCase__ : Any =None
| 367
|
"""simple docstring"""
import numpy as np
from PIL import Image
def snake_case__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =np.array(__lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCamelCase__ : int =0
lowerCamelCase__ : int =0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : List[Any] =0
# compute the shape of the output matrix
lowerCamelCase__ : Union[str, Any] =(arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCamelCase__ : Union[str, Any] =np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCamelCase__ : str =np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : Optional[int] =0
return updated_arr
def snake_case__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =np.array(__lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCamelCase__ : str =0
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : List[Any] =0
# compute the shape of the output matrix
lowerCamelCase__ : Dict =(arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCamelCase__ : Optional[Any] =np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCamelCase__ : Optional[int] =int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase__ : Optional[Any] =0
lowerCamelCase__ : int =0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
_lowercase : int = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 272
| 0
|
class __UpperCAmelCase :
def __init__( self : List[Any], __A : list ):
UpperCAmelCase : Tuple = set_counts
UpperCAmelCase : Union[str, Any] = max(__A )
UpperCAmelCase : Optional[Any] = len(__A )
UpperCAmelCase : List[Any] = [1] * num_sets
UpperCAmelCase : List[Any] = list(range(__A ) )
def __magic_name__ ( self : List[Any], __A : int, __A : int ):
UpperCAmelCase : Any = self.get_parent(__A )
UpperCAmelCase : Dict = self.get_parent(__A )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
UpperCAmelCase : Tuple = 0
UpperCAmelCase : List[str] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
UpperCAmelCase : Optional[int] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
UpperCAmelCase : str = 0
UpperCAmelCase : Optional[Any] = src_parent
UpperCAmelCase : Any = self.set_counts[src_parent]
UpperCAmelCase : Tuple = max(self.max_set, __A )
return True
def __magic_name__ ( self : List[Any], __A : int ):
if self.parents[disj_set] == disj_set:
return disj_set
UpperCAmelCase : List[str] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 336
|
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Optional[Any]:
UpperCAmelCase : List[str] = 0
UpperCAmelCase : List[Any] = len(UpperCAmelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase : Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCAmelCase ):
return None
UpperCAmelCase : Optional[Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
UpperCAmelCase : Any = left
UpperCAmelCase : List[str] = point
elif point > right:
UpperCAmelCase : Any = right
UpperCAmelCase : List[str] = point
else:
if item < current_item:
UpperCAmelCase : Optional[int] = point - 1
else:
UpperCAmelCase : str = point + 1
return None
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] ) -> Dict:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
UpperCAmelCase : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCAmelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
elif point > right:
return interpolation_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCAmelCase , UpperCAmelCase , point + 1 , UpperCAmelCase )
def a__ ( UpperCAmelCase : Union[str, Any] ) -> int:
if collection != sorted(UpperCAmelCase ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
_lowerCamelCase : Optional[int] = 0
if debug == 1:
_lowerCamelCase : Dict = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
_lowerCamelCase : List[Any] = 6_7
_lowerCamelCase : Optional[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 336
| 1
|
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowerCAmelCase_ ( __lowerCamelCase=None ):
if subparsers is not None:
__snake_case : Tuple = subparsers.add_parser("env" )
else:
__snake_case : List[str] = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" , default=_UpperCAmelCase , help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : str = torch.__version__
__snake_case : int = torch.cuda.is_available()
__snake_case : int = is_xpu_available()
__snake_case : Tuple = is_npu_available()
__snake_case : List[Any] = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_UpperCAmelCase ):
__snake_case : Tuple = load_config_from_file(args.config_file ).to_dict()
__snake_case : Dict = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F'{pt_version} ({pt_cuda_available})',
'PyTorch XPU available': str(_UpperCAmelCase ),
'PyTorch NPU available': str(_UpperCAmelCase ),
'System RAM': F'{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB',
}
if pt_cuda_available:
__snake_case : Optional[int] = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([F'- {prop}: {val}' for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
__snake_case : str = (
'\n'.join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(_UpperCAmelCase , _UpperCAmelCase )
else F'\t{accelerate_config}'
)
print(_UpperCAmelCase )
__snake_case : Dict = accelerate_config
return info
def lowerCAmelCase_ ( ):
__snake_case : int = env_command_parser()
__snake_case : Optional[int] = parser.parse_args()
env_command(_UpperCAmelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 359
|
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_snake_case : Union[str, Any] = datasets.load_iris()
_snake_case : Tuple = np.array(data["data"])
_snake_case : int = np.array(data["target"])
_snake_case : int = data["target_names"]
_snake_case , _snake_case , _snake_case , _snake_case : Any = train_test_split(X, y)
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return np.linalg.norm(np.array(__lowerCamelCase ) - np.array(__lowerCamelCase ) )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=5 ):
__snake_case : Optional[Any] = zip(__lowerCamelCase , __lowerCamelCase )
# List of distances of all points from the point to be classified
__snake_case : Optional[int] = []
for data_point in data:
__snake_case : Union[str, Any] = euclidean_distance(data_point[0] , __lowerCamelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__snake_case : Dict = [i[1] for i in sorted(__lowerCamelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__snake_case : Any = Counter(__lowerCamelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 134
| 0
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowercase )
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : str = field(default="image-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__UpperCAmelCase : ClassVar[Features] = Features({"image": Image()} )
__UpperCAmelCase : ClassVar[Features] = Features({"labels": ClassLabel} )
__UpperCAmelCase : str = "image"
__UpperCAmelCase : str = "labels"
def _lowercase ( self : str, UpperCAmelCase__ : Optional[int] ):
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column], UpperCAmelCase__ ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
__lowercase = copy.deepcopy(self )
__lowercase = self.label_schema.copy()
__lowercase = features[self.label_column]
__lowercase = label_schema
return task_template
@property
def _lowercase ( self : Optional[Any] ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 17
|
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _lowerCAmelCase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Optional[Any], UpperCAmelCase__ : str ):
super().__init__()
__lowercase = model
__lowercase = 2
__lowercase = nn.Linear(self.model.config.hidden_size, self.num_labels )
def _lowercase ( self : Optional[int] ):
pass
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : str) -> str:
'''simple docstring'''
__lowercase = LongformerModel.from_pretrained(UpperCamelCase_)
__lowercase = LightningModel(UpperCamelCase_)
__lowercase = torch.load(UpperCamelCase_, map_location=torch.device("cpu"))
lightning_model.load_state_dict(ckpt["state_dict"])
# init longformer question answering model
__lowercase = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase_)
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict())
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict())
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCamelCase_)
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 17
| 1
|
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self , snake_case_ = 1 , snake_case_ = None , snake_case_ = 5_0 , snake_case_ = "pil" , snake_case_ = True , **snake_case_ , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=snake_case_ , )
UpperCAmelCase_ : List[str] = image.to(self.device )
# set step values
self.scheduler.set_timesteps(snake_case_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase_ : List[Any] = self.unet(snake_case_ , snake_case_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCAmelCase_ : Optional[int] = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
UpperCAmelCase_ : int = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ : Tuple = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=snake_case_ ), "This is a local test"
| 274
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :Tuple = ['''image_processor''', '''tokenizer''']
lowerCamelCase_ :Optional[Any] = '''ViTImageProcessor'''
lowerCamelCase_ :int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case_ , )
UpperCAmelCase_ : int = kwargs.pop('feature_extractor' )
UpperCAmelCase_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case_ , snake_case_ )
def __call__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ ):
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
UpperCAmelCase_ : Optional[int] = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if visual_prompt is not None:
UpperCAmelCase_ : Optional[Any] = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if images is not None:
UpperCAmelCase_ : int = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if visual_prompt is not None and images is not None:
UpperCAmelCase_ : Tuple = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase_ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase_ : Dict = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def _UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def _UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case_ , )
return self.image_processor_class
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case_ , )
return self.image_processor
| 274
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : Optional[int] = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class A_ (a_ ):
UpperCAmelCase__ = '''vit_mae'''
def __init__( self , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu" , _A=0.0 , _A=0.0 , _A=0.02 , _A=1E-12 , _A=2_2_4 , _A=1_6 , _A=3 , _A=True , _A=1_6 , _A=5_1_2 , _A=8 , _A=2_0_4_8 , _A=0.75 , _A=False , **_A , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = qkv_bias
UpperCAmelCase = decoder_num_attention_heads
UpperCAmelCase = decoder_hidden_size
UpperCAmelCase = decoder_num_hidden_layers
UpperCAmelCase = decoder_intermediate_size
UpperCAmelCase = mask_ratio
UpperCAmelCase = norm_pix_loss
| 273
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class A_ :
def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=1_6 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_labels
UpperCAmelCase = use_mc_token_ids
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
if self.use_mc_token_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLModel(config=_A )
model.to(_A )
model.eval()
model(_A , token_type_ids=_A , head_mask=_A )
model(_A , token_type_ids=_A )
UpperCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _lowercase ( self , _A , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def _lowercase ( self , _A , _A , _A , _A , *_A ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = CTRLForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class A_ (a_ , a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , n_embd=3_7 )
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_A )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
'''simple docstring'''
pass
@slow
def _lowercase ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = CTRLModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _lowercase ( self ):
'''simple docstring'''
pass
@require_torch
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(_A )
UpperCAmelCase = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=_A ) # Legal the president is
UpperCAmelCase = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCAmelCase = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].tolist() , _A )
| 273
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
def __init__( self ,A ,A=13 ,A=[30, 30] ,A=2 ,A=3 ,A=True ,A=True ,A=32 ,A=5 ,A=4 ,A=37 ,A="gelu" ,A=0.1 ,A=0.1 ,A=10 ,A=0.02 ,A=3 ,A=None ,A=8 ,A=10 ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = n_targets
UpperCAmelCase = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
UpperCAmelCase = (image_size[1] // patch_size) * (image_size[0] // patch_size)
UpperCAmelCase = num_patches + 1 + self.num_detection_tokens
def _UpperCamelCase ( self ):
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
UpperCAmelCase = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
UpperCAmelCase = []
for i in range(self.batch_size ):
UpperCAmelCase = {}
UpperCAmelCase = torch.randint(
high=self.num_labels ,size=(self.n_targets,) ,device=A )
UpperCAmelCase = torch.rand(self.n_targets ,4 ,device=A )
labels.append(A )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ):
return YolosConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A ,initializer_range=self.initializer_range ,num_detection_tokens=self.num_detection_tokens ,num_labels=self.num_labels ,)
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = YolosModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.expected_seq_len, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = YolosForObjectDetection(A )
model.to(A )
model.eval()
UpperCAmelCase = model(pixel_values=A )
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape ,(self.batch_size, self.num_detection_tokens, 4) )
UpperCAmelCase = model(pixel_values=A ,labels=A )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape ,(self.batch_size, self.num_detection_tokens, 4) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ,A ,A ,A=False ):
UpperCAmelCase = super()._prepare_for_class(A ,A ,return_labels=A )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
UpperCAmelCase = []
for i in range(self.model_tester.batch_size ):
UpperCAmelCase = {}
UpperCAmelCase = torch.ones(
size=(self.model_tester.n_targets,) ,device=A ,dtype=torch.long )
UpperCAmelCase = torch.ones(
self.model_tester.n_targets ,4 ,device=A ,dtype=torch.float )
labels.append(A )
UpperCAmelCase = labels
return inputs_dict
def _UpperCamelCase ( self ):
UpperCAmelCase = YolosModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,has_text_modality=A ,hidden_size=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
# YOLOS does not use inputs_embeds
pass
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(A )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A ,nn.Linear ) )
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(A )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
# in YOLOS, the seq_len is different
UpperCAmelCase = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(A ,A ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(A ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(A ,A ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(A ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
UpperCAmelCase = len(A )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(A ,A ) )
UpperCAmelCase = 1
self.assertEqual(out_len + added_hidden_states ,len(A ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(A ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
def _UpperCamelCase ( self ):
def check_hidden_states_output(A ,A ,A ):
UpperCAmelCase = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(A ,A ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A ) ,A )
# YOLOS has a different seq_length
UpperCAmelCase = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(A ,A ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*A )
@slow
def _UpperCamelCase ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = YolosModel.from_pretrained(A )
self.assertIsNotNone(A )
def _a ( ):
"""simple docstring"""
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self ):
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(A )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=A ,return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(inputs.pixel_values )
# verify outputs
UpperCAmelCase = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape ,A )
UpperCAmelCase = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ,device=A ,)
UpperCAmelCase = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ,device=A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,A ,atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] ,A ,atol=1e-4 ) )
# verify postprocessing
UpperCAmelCase = image_processor.post_process_object_detection(
A ,threshold=0.3 ,target_sizes=[image.size[::-1]] )[0]
UpperCAmelCase = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(A )
UpperCAmelCase = [75, 75, 17, 63, 17]
UpperCAmelCase = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(A )
self.assertEqual(len(results["""scores"""] ) ,5 )
self.assertTrue(torch.allclose(results["""scores"""] ,A ,atol=1e-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() ,A )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] ,A ) )
| 234
|
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_UpperCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
_UpperCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _a ( _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = create_model(
"""HTSAT-tiny""" , """roberta""" , _snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_snake_case , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = {}
UpperCAmelCase = R""".*sequential.(\d+).*"""
UpperCAmelCase = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase = key.replace(_snake_case , _snake_case )
if re.match(_snake_case , _snake_case ):
# replace sequential layers with list
UpperCAmelCase = re.match(_snake_case , _snake_case ).group(1 )
UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' )
elif re.match(_snake_case , _snake_case ):
UpperCAmelCase = int(re.match(_snake_case , _snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase = 1 if projecton_layer == 0 else 2
UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase = value
UpperCAmelCase = mixed_qkv.size(0 ) // 3
UpperCAmelCase = mixed_qkv[:qkv_dim]
UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase = query_layer
UpperCAmelCase = key_layer
UpperCAmelCase = value_layer
else:
UpperCAmelCase = value
return model_state_dict
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = init_clap(_snake_case , enable_fusion=_snake_case )
clap_model.eval()
UpperCAmelCase = clap_model.state_dict()
UpperCAmelCase = rename_state_dict(_snake_case )
UpperCAmelCase = ClapConfig()
UpperCAmelCase = enable_fusion
UpperCAmelCase = ClapModel(_snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(_snake_case , strict=_snake_case )
model.save_pretrained(_snake_case )
transformers_config.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
_UpperCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 234
| 1
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__snake_case = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
"""simple docstring"""
for attribute in key.split('.' ):
__UpperCamelCase = getattr(_A , _A )
if weight_type is not None:
__UpperCamelCase = getattr(_A , _A ).shape
else:
__UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__UpperCamelCase = value
elif weight_type == "weight_g":
__UpperCamelCase = value
elif weight_type == "weight_v":
__UpperCamelCase = value
elif weight_type == "bias":
__UpperCamelCase = value
else:
__UpperCamelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _A ( _lowercase , _lowercase ) -> str:
"""simple docstring"""
__UpperCamelCase = []
__UpperCamelCase = fairseq_model.state_dict()
__UpperCamelCase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__UpperCamelCase = None
for name, value in fairseq_dict.items():
__UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == 'group' , )
__UpperCamelCase = True
elif name.split('.' )[0] == "proj":
__UpperCamelCase = fairseq_model.proj
__UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__UpperCamelCase = True
if "*" in mapped_key:
__UpperCamelCase = name.split(_A )[0].split('.' )[-2]
__UpperCamelCase = mapped_key.replace('*' , _A )
if "weight_g" in name:
__UpperCamelCase = 'weight_g'
elif "weight_v" in name:
__UpperCamelCase = 'weight_v'
elif "bias" in name:
__UpperCamelCase = 'bias'
elif "weight" in name:
__UpperCamelCase = 'weight'
else:
__UpperCamelCase = None
set_recursively(_A , _A , _A , _A , _A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = full_name.split('conv_layers.' )[-1]
__UpperCamelCase = name.split('.' )
__UpperCamelCase = int(items[0] )
__UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_A )
def _A ( _lowercase ) -> List[str]:
"""simple docstring"""
__UpperCamelCase, __UpperCamelCase = emb.weight.shape
__UpperCamelCase = nn.Linear(_A , _A , bias=_A )
__UpperCamelCase = emb.weight.data
return lin_layer
def _A ( _lowercase ) -> List[str]:
"""simple docstring"""
with open(_A , 'r' , encoding='utf-8' ) as f:
__UpperCamelCase = f.readlines()
__UpperCamelCase = [line.split(' ' )[0] for line in lines]
__UpperCamelCase = len(_A )
__UpperCamelCase = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(_A , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = WavaVecaConfig.from_pretrained(_A )
__UpperCamelCase = SpeechaTextaConfig.from_pretrained(
_A , vocab_size=_A , decoder_layers=_A , do_stable_layer_norm=_A )
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__UpperCamelCase = model[0].eval()
# set weights for wav2vec2 encoder
__UpperCamelCase = WavaVecaModel(_A )
__UpperCamelCase = recursively_load_weights_wavaveca(model.encoder , _A )
__UpperCamelCase = SpeechaTextaForCausalLM(_A )
__UpperCamelCase, __UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_A )
# set output linear layer
unexpected_keys.remove('embed_out' )
__UpperCamelCase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
__UpperCamelCase = SpeechEncoderDecoderModel(encoder=_A , decoder=_A )
__UpperCamelCase = False
# add projection layer
__UpperCamelCase = nn.Parameter(projection_layer.weight )
__UpperCamelCase = nn.Parameter(projection_layer.bias )
__UpperCamelCase = create_vocab_dict(_A )
with open(os.path.join(_A , 'vocab.json' ) , 'w' ) as fp:
json.dump(_A , _A )
__UpperCamelCase = SpeechaTextaTokenizer(os.path.join(_A , 'vocab.json' ) )
tokenizer.save_pretrained(_A )
__UpperCamelCase = hf_wavavec.config.to_dict()
__UpperCamelCase = tokenizer.pad_token_id
__UpperCamelCase = tokenizer.bos_token_id
__UpperCamelCase = tokenizer.eos_token_id
__UpperCamelCase = 'speech_to_text_2'
__UpperCamelCase = 'wav2vec2'
__UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(_A )
hf_wavavec.save_pretrained(_A )
feature_extractor.save_pretrained(_A )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_0_2_2_4, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
__snake_case = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 310
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272
| 0
|
import os
def A_ ( ):
with open(os.path.dirname(_UpperCAmelCase ) + "/p022_names.txt" ) as file:
SCREAMING_SNAKE_CASE_: List[str] = str(file.readlines()[0] )
SCREAMING_SNAKE_CASE_: Optional[int] = names.replace("\"" , "" ).split("," )
names.sort()
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: List[str] = 0
for i, name in enumerate(_UpperCAmelCase ):
for letter in name:
name_score += ord(_UpperCAmelCase ) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE_: Optional[int] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 127
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 127
| 1
|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
class lowerCAmelCase :
def __init__( self : List[str] , UpperCAmelCase : int ) -> Dict:
lowerCamelCase__ : list[list[Edge]] = [[] for _ in range(UpperCAmelCase )]
lowerCamelCase__ : str = size
def __getitem__( self : List[str] , UpperCAmelCase : int ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def A_ ( self : Dict ) -> List[Any]:
return self._size
def A_ ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ) -> List[Any]:
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(UpperCAmelCase , UpperCAmelCase ) )
def A_ ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : int ) -> int | None:
lowerCamelCase__ : Dict = deque([start_vertex] )
lowerCamelCase__ : list[int | None] = [None] * self.size
lowerCamelCase__ : int = 0
while queue:
lowerCamelCase__ : str = queue.popleft()
lowerCamelCase__ : List[Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCamelCase__ : Any = current_distance + edge.weight
lowerCamelCase__ : List[Any] = distances[edge.destination_vertex]
if (
isinstance(UpperCAmelCase , UpperCAmelCase )
and new_distance >= dest_vertex_distance
):
continue
lowerCamelCase__ : List[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
|
'''simple docstring'''
__snake_case : Tuple = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__snake_case : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
__snake_case : Any = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 134
| 0
|
'''simple docstring'''
def _a ( _lowerCamelCase ) -> list[list[int]]:
"""simple docstring"""
__snake_case : List[Any] = []
if len(_lowerCamelCase ) == 1:
return [nums.copy()]
for _ in range(len(_lowerCamelCase ) ):
__snake_case : int = nums.pop(0 )
__snake_case : Optional[int] = permute(_lowerCamelCase )
for perm in permutations:
perm.append(_lowerCamelCase )
result.extend(_lowerCamelCase )
nums.append(_lowerCamelCase )
return result
def _a ( _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
def backtrack(_lowerCamelCase ):
if start == len(_lowerCamelCase ) - 1:
output.append(nums[:] )
else:
for i in range(_lowerCamelCase , len(_lowerCamelCase ) ):
__snake_case , __snake_case : Tuple = nums[i], nums[start]
backtrack(start + 1 )
__snake_case , __snake_case : Any = nums[i], nums[start] # backtrack
__snake_case : str = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__UpperCamelCase = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 13
|
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 13
| 1
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Any = logging.get_logger(__name__)
A : Optional[int] = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : List[str] = '''autoformer'''
__lowerCamelCase : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : List[Any] , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : str = "student_t" , __lowerCAmelCase : str = "nll" , __lowerCAmelCase : int = 1 , __lowerCAmelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowerCAmelCase : bool = True , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : int = 64 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 32 , __lowerCAmelCase : int = 32 , __lowerCAmelCase : str = "gelu" , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : int = 1_00 , __lowerCAmelCase : float = 0.0_2 , __lowerCAmelCase : bool = True , __lowerCAmelCase : str=True , __lowerCAmelCase : int = 10 , __lowerCAmelCase : int = 25 , __lowerCAmelCase : int = 3 , **__lowerCAmelCase : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
A__ = prediction_length
A__ = context_length if context_length is not None else prediction_length
A__ = distribution_output
A__ = loss
A__ = input_size
A__ = num_time_features
A__ = lags_sequence
A__ = scaling
A__ = num_dynamic_real_features
A__ = num_static_real_features
A__ = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
A__ = cardinality
else:
A__ = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
A__ = embedding_dimension
else:
A__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A__ = num_parallel_samples
# Transformer architecture configuration
A__ = input_size * len(self.lags_sequence ) + self._number_of_features
A__ = d_model
A__ = encoder_attention_heads
A__ = decoder_attention_heads
A__ = encoder_ffn_dim
A__ = decoder_ffn_dim
A__ = encoder_layers
A__ = decoder_layers
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = activation_function
A__ = init_std
A__ = use_cache
# Autoformer
A__ = label_length
A__ = moving_average
A__ = autocorrelation_factor
super().__init__(is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase )
@property
def a_ ( self : Optional[int] ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 274
|
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class A :
'''simple docstring'''
def __init__( self : List[Any] ) -> str:
"""simple docstring"""
A__ = [
[],
[],
[],
]
def a_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None:
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(__lowerCAmelCase )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def a_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self : Tuple ) -> str:
"""simple docstring"""
return "\n".join(f'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class A :
'''simple docstring'''
def __init__( self : int ) -> str:
"""simple docstring"""
A__ = []
def a_ ( self : int , __lowerCAmelCase : int ) -> None:
"""simple docstring"""
if len(self.queue ) == 1_00:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(__lowerCAmelCase )
def a_ ( self : List[str] ) -> int:
"""simple docstring"""
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
A__ = min(self.queue )
self.queue.remove(__lowerCAmelCase )
return data
def __str__( self : List[Any] ) -> str:
"""simple docstring"""
return str(self.queue )
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ = FixedPriorityQueue()
fpq.enqueue(0 , 1_0 )
fpq.enqueue(1 , 7_0 )
fpq.enqueue(0 , 1_0_0 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 6_4 )
fpq.enqueue(0 , 1_2_8 )
print(__a )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(__a )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ = ElementPriorityQueue()
epq.enqueue(1_0 )
epq.enqueue(7_0 )
epq.enqueue(1_0_0 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(6_4 )
epq.enqueue(1_2_8 )
print(__a )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(__a )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 274
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
A_ : Any =False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=3 , a__=18 , a__=30 , a__=4_00 , a__=None , a__=True , a__=True , a__=None , ):
_lowerCamelCase = size if size is not None else {'height': 20, 'width': 20}
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = image_size
_lowerCamelCase = min_resolution
_lowerCamelCase = max_resolution
_lowerCamelCase = size
_lowerCamelCase = do_normalize
_lowerCamelCase = do_convert_rgb
_lowerCamelCase = [5_12, 10_24, 20_48, 40_96]
_lowerCamelCase = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def snake_case_ ( self ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def snake_case_ ( self ):
_lowerCamelCase = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
_lowerCamelCase = Image.open(requests.get(a__ , stream=a__ ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class __a ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : Tuple = PixaStructImageProcessor if is_vision_available() else None
def snake_case_ ( self ):
_lowerCamelCase = PixaStructImageProcessingTester(self )
@property
def snake_case_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ):
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , 'do_normalize' ) )
self.assertTrue(hasattr(a__ , 'do_convert_rgb' ) )
def snake_case_ ( self ):
_lowerCamelCase = self.image_processor_tester.prepare_dummy_image()
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
_lowerCamelCase = 20_48
_lowerCamelCase = image_processor(a__ , return_tensors='pt' , max_patches=a__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def snake_case_ ( self ):
# Initialize image_processor
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
_lowerCamelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_lowerCamelCase = image_processor(
a__ , return_tensors='pt' , max_patches=a__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def snake_case_ ( self ):
# Initialize image_processor
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
_lowerCamelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
_lowerCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(a__ ):
_lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a__ ).flattened_patches
_lowerCamelCase = 'Hello'
_lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a__ , header_text=a__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_lowerCamelCase = image_processor(
a__ , return_tensors='pt' , max_patches=a__ , header_text=a__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def snake_case_ ( self ):
# Initialize image_processor
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
_lowerCamelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_lowerCamelCase = image_processor(
a__ , return_tensors='pt' , max_patches=a__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def snake_case_ ( self ):
# Initialize image_processor
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
_lowerCamelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_lowerCamelCase = image_processor(
a__ , return_tensors='pt' , max_patches=a__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class __a ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : str = PixaStructImageProcessor if is_vision_available() else None
def snake_case_ ( self ):
_lowerCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
_lowerCamelCase = 3
@property
def snake_case_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ):
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , 'do_normalize' ) )
self.assertTrue(hasattr(a__ , 'do_convert_rgb' ) )
def snake_case_ ( self ):
# Initialize image_processor
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
_lowerCamelCase = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_lowerCamelCase = image_processor(
a__ , return_tensors='pt' , max_patches=a__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 80
|
"""simple docstring"""
import numpy as np
def SCREAMING_SNAKE_CASE_ ( snake_case : np.ndarray )-> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE_ ( snake_case : np.ndarray )-> np.ndarray:
return vector * sigmoid(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80
| 1
|
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
_UpperCAmelCase : List[str] = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
_UpperCAmelCase , _UpperCAmelCase : Tuple = input_paths_and_base_extractors[compression_format]
if input_path is None:
_UpperCAmelCase : Any = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowerCAmelCase )
assert base_extractor.is_extractable(__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(__lowerCAmelCase , __lowerCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_UpperCAmelCase : List[Any] = file_path.read_text(encoding="utf-8" )
else:
_UpperCAmelCase : Optional[Any] = output_path.read_text(encoding="utf-8" )
_UpperCAmelCase : str = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
_UpperCAmelCase : Tuple = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
_UpperCAmelCase : List[Any] = input_paths[compression_format]
if input_path is None:
_UpperCAmelCase : Optional[int] = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = Extractor.infer_extractor_format(__lowerCAmelCase )
assert extractor_format is not None
_UpperCAmelCase : Dict = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_UpperCAmelCase : Any = file_path.read_text(encoding="utf-8" )
else:
_UpperCAmelCase : Dict = output_path.read_text(encoding="utf-8" )
_UpperCAmelCase : Dict = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
import tarfile
_UpperCAmelCase : Dict = tmp_path / "data_dot_dot"
directory.mkdir()
_UpperCAmelCase : Any = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(__lowerCAmelCase , "w" ) as f:
f.add(__lowerCAmelCase , arcname=os.path.join(".." , text_file.name ) )
return path
@pytest.fixture
def __lowerCAmelCase (__lowerCAmelCase ):
import tarfile
_UpperCAmelCase : Optional[Any] = tmp_path / "data_sym_link"
directory.mkdir()
_UpperCAmelCase : List[Any] = directory / "tar_file_with_sym_link.tar"
os.symlink(".." , directory / "subdir" , target_is_directory=__lowerCAmelCase )
with tarfile.TarFile(__lowerCAmelCase , "w" ) as f:
f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
_UpperCAmelCase : Any = insecure_tar_files[insecure_tar_file]
_UpperCAmelCase : Dict = tmp_path / "extracted"
TarExtractor.extract(__lowerCAmelCase , __lowerCAmelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __lowerCAmelCase (__lowerCAmelCase ):
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
_UpperCAmelCase : Union[str, Any] = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
_UpperCAmelCase : Optional[Any] = (
B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(__lowerCAmelCase )
assert zipfile.is_zipfile(str(__lowerCAmelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__lowerCAmelCase ) # but we're right
| 234
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Dict = filter(lambda __lowerCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase : Optional[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCamelCase__ = logging.getLogger(__name__)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if metric == "rouge2":
_UpperCAmelCase : List[str] = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_UpperCAmelCase : int = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_UpperCAmelCase : str = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
_UpperCAmelCase : List[Any] = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
_UpperCAmelCase : Any = ModelCheckpoint(
dirpath=__lowerCAmelCase , filename=__lowerCAmelCase , monitor=F"""val_{metric}""" , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=__lowerCAmelCase , verbose=__lowerCAmelCase , )
class lowerCAmelCase__ ( pl.Callback ):
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Dict = {F"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCamelCase__ )
@rank_zero_only
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : pl.Trainer , lowerCamelCase__ : pl.LightningModule , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any]=True ) ->None:
'''simple docstring'''
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_UpperCAmelCase : str = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_UpperCAmelCase : Any = Path(pl_module.hparams.output_dir )
if type_path == "test":
_UpperCAmelCase : List[Any] = od / "test_results.txt"
_UpperCAmelCase : Union[str, Any] = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase : Optional[Any] = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
_UpperCAmelCase : Dict = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCamelCase__ )
generations_file.parent.mkdir(exist_ok=lowerCamelCase__ )
with open(lowerCamelCase__ , "a+" ) as writer:
for key in sorted(lowerCamelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase : Dict = metrics[key]
if isinstance(lowerCamelCase__ , torch.Tensor ):
_UpperCAmelCase : Tuple = val.item()
_UpperCAmelCase : str = F"""{key}: {val:.6f}\n"""
writer.write(lowerCamelCase__ )
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase : Optional[int] = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(lowerCamelCase__ )
@rank_zero_only
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] ) ->Any:
'''simple docstring'''
try:
_UpperCAmelCase : Tuple = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase : int = pl_module.model.num_parameters()
_UpperCAmelCase : int = count_trainable_parameters(lowerCamelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : pl.Trainer , lowerCamelCase__ : pl.LightningModule ) ->int:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCamelCase__ , lowerCamelCase__ , "test" )
@rank_zero_only
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : pl.Trainer , lowerCamelCase__ : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 234
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Tuple = torch.device("""cpu""")
def lowerCAmelCase( )-> int:
"""simple docstring"""
UpperCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase_ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> List[Any]:
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03E00, 2.11_07E00, -2.08_11E00, 8.86_85E-01, 2.43_60E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36E-01, 2.34_78E-01, -1.69_63E00, -1.73_81E00, -8.63_37E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68E-01, -4.74_29E-01, -1.08_97E00, -1.02_48E00, 3.55_23E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30E-01, 2.42_11E-01, -6.01_85E-01, -8.27_89E-01, -6.04_46E-02] )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Tuple:
"""simple docstring"""
UpperCamelCase_ = dct.pop(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = val
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Any:
"""simple docstring"""
UpperCamelCase_ = []
for k in state_dict.keys():
UpperCamelCase_ = k
if ".pwconv" in k:
UpperCamelCase_ = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
UpperCamelCase_ = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
UpperCamelCase_ = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
UpperCamelCase_ = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
UpperCamelCase_ = k_new.split("." )
if ls[2].isdigit():
UpperCamelCase_ = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
UpperCamelCase_ = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCamelCase_ = 1_0_0_0
UpperCamelCase_ = "huggingface/label-files"
UpperCamelCase_ = "imagenet-1k-id2label.json"
UpperCamelCase_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
UpperCamelCase_ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCamelCase_ = [3, 3, 6, 4]
UpperCamelCase_ = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
UpperCamelCase_ = [3, 3, 9, 6]
UpperCamelCase_ = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
UpperCamelCase_ = [4, 3, 1_0, 5]
UpperCamelCase_ = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
UpperCamelCase_ = [4, 4, 1_2, 6]
UpperCamelCase_ = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
UpperCamelCase_ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" , check_hash=SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase_ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
UpperCamelCase_ = checkpoint
UpperCamelCase_ = create_rename_keys(SCREAMING_SNAKE_CASE_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
UpperCamelCase_ = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# prepare test inputs
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = ViTImageProcessor.from_pretrained("preprocessor_config" )
UpperCamelCase_ = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
# compare outputs from both models
UpperCamelCase_ = get_expected_output(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 358
|
from ...processing_utils import ProcessorMixin
class __magic_name__ ( snake_case ):
UpperCamelCase_ :str = """SpeechT5FeatureExtractor"""
UpperCamelCase_ :Optional[int] = """SpeechT5Tokenizer"""
def __init__( self , _lowercase , _lowercase )-> Union[str, Any]:
super().__init__(_lowercase , _lowercase )
def __call__( self , *_lowercase , **_lowercase )-> Tuple:
UpperCamelCase_ = kwargs.pop("audio" , _lowercase )
UpperCamelCase_ = kwargs.pop("text" , _lowercase )
UpperCamelCase_ = kwargs.pop("text_target" , _lowercase )
UpperCamelCase_ = kwargs.pop("audio_target" , _lowercase )
UpperCamelCase_ = kwargs.pop("sampling_rate" , _lowercase )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
UpperCamelCase_ = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
elif text is not None:
UpperCamelCase_ = self.tokenizer(_lowercase , **_lowercase )
else:
UpperCamelCase_ = None
if audio_target is not None:
UpperCamelCase_ = self.feature_extractor(audio_target=_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
UpperCamelCase_ = targets["input_values"]
elif text_target is not None:
UpperCamelCase_ = self.tokenizer(_lowercase , **_lowercase )
UpperCamelCase_ = targets["input_ids"]
else:
UpperCamelCase_ = None
if inputs is None:
return targets
if targets is not None:
UpperCamelCase_ = labels
UpperCamelCase_ = targets.get("attention_mask" )
if decoder_attention_mask is not None:
UpperCamelCase_ = decoder_attention_mask
return inputs
def UpperCAmelCase_ ( self , *_lowercase , **_lowercase )-> Optional[int]:
UpperCamelCase_ = kwargs.pop("input_values" , _lowercase )
UpperCamelCase_ = kwargs.pop("input_ids" , _lowercase )
UpperCamelCase_ = kwargs.pop("labels" , _lowercase )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
UpperCamelCase_ = self.feature_extractor.pad(_lowercase , *_lowercase , **_lowercase )
elif input_ids is not None:
UpperCamelCase_ = self.tokenizer.pad(_lowercase , **_lowercase )
else:
UpperCamelCase_ = None
if labels is not None:
if "input_ids" in labels or (isinstance(_lowercase , _lowercase ) and "input_ids" in labels[0]):
UpperCamelCase_ = self.tokenizer.pad(_lowercase , **_lowercase )
UpperCamelCase_ = targets["input_ids"]
else:
UpperCamelCase_ = self.feature_extractor.feature_size
UpperCamelCase_ = self.feature_extractor.num_mel_bins
UpperCamelCase_ = self.feature_extractor.pad(_lowercase , *_lowercase , **_lowercase )
UpperCamelCase_ = feature_size_hack
UpperCamelCase_ = targets["input_values"]
else:
UpperCamelCase_ = None
if inputs is None:
return targets
if targets is not None:
UpperCamelCase_ = labels
UpperCamelCase_ = targets.get("attention_mask" )
if decoder_attention_mask is not None:
UpperCamelCase_ = decoder_attention_mask
return inputs
def UpperCAmelCase_ ( self , *_lowercase , **_lowercase )-> int:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def UpperCAmelCase_ ( self , *_lowercase , **_lowercase )-> int:
return self.tokenizer.decode(*_lowercase , **_lowercase )
| 60
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'deta'
__magic_name__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __snake_case=None , __snake_case=9_0_0 , __snake_case=2_0_4_8 , __snake_case=6 , __snake_case=2_0_4_8 , __snake_case=8 , __snake_case=6 , __snake_case=1_0_2_4 , __snake_case=8 , __snake_case=0.0 , __snake_case=True , __snake_case="relu" , __snake_case=2_5_6 , __snake_case=0.1 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1.0 , __snake_case=True , __snake_case=False , __snake_case="sine" , __snake_case=5 , __snake_case=4 , __snake_case=4 , __snake_case=True , __snake_case=3_0_0 , __snake_case=True , __snake_case=True , __snake_case=1 , __snake_case=5 , __snake_case=2 , __snake_case=1 , __snake_case=1 , __snake_case=5 , __snake_case=2 , __snake_case=0.1 , __snake_case=0.25 , **__snake_case , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
snake_case = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(__snake_case , __snake_case ):
snake_case = backbone_config.pop('''model_type''' )
snake_case = CONFIG_MAPPING[backbone_model_type]
snake_case = config_class.from_dict(__snake_case )
snake_case = backbone_config
snake_case = num_queries
snake_case = max_position_embeddings
snake_case = d_model
snake_case = encoder_ffn_dim
snake_case = encoder_layers
snake_case = encoder_attention_heads
snake_case = decoder_ffn_dim
snake_case = decoder_layers
snake_case = decoder_attention_heads
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = activation_function
snake_case = init_std
snake_case = init_xavier_std
snake_case = encoder_layerdrop
snake_case = auxiliary_loss
snake_case = position_embedding_type
# deformable attributes
snake_case = num_feature_levels
snake_case = encoder_n_points
snake_case = decoder_n_points
snake_case = two_stage
snake_case = two_stage_num_proposals
snake_case = with_box_refine
snake_case = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
snake_case = class_cost
snake_case = bbox_cost
snake_case = giou_cost
# Loss coefficients
snake_case = mask_loss_coefficient
snake_case = dice_loss_coefficient
snake_case = bbox_loss_coefficient
snake_case = giou_loss_coefficient
snake_case = eos_coefficient
snake_case = focal_alpha
super().__init__(is_encoder_decoder=__snake_case , **__snake_case )
@property
def a_ ( self ):
return self.encoder_attention_heads
@property
def a_ ( self ):
return self.d_model
def a_ ( self ):
snake_case = copy.deepcopy(self.__dict__ )
snake_case = self.backbone_config.to_dict()
snake_case = self.__class__.model_type
return output
| 127
|
from __future__ import annotations
_SCREAMING_SNAKE_CASE : Optional[int] = []
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
for i in range(len(UpperCamelCase_ ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase_ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase_ ,-1 ,-1 ) ,range(UpperCamelCase_ ,-1 ,-1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase_ ,-1 ,-1 ) ,range(UpperCamelCase_ ,len(UpperCamelCase_ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if row >= len(UpperCamelCase_ ):
solution.append(UpperCamelCase_ )
printboard(UpperCamelCase_ )
print()
return True
for i in range(len(UpperCamelCase_ ) ):
if is_safe(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
snake_case = 1
solve(UpperCamelCase_ ,row + 1 )
snake_case = 0
return False
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
for i in range(len(UpperCamelCase_ ) ):
for j in range(len(UpperCamelCase_ ) ):
if board[i][j] == 1:
print('''Q''' ,end=''' ''' )
else:
print('''.''' ,end=''' ''' )
print()
# n=int(input("The no. of queens"))
_SCREAMING_SNAKE_CASE : Tuple = 8
_SCREAMING_SNAKE_CASE : List[Any] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 127
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : int
def __a ( _UpperCamelCase: str ) -> list[str]:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(_UpperCamelCase ) )]
def __a ( _UpperCamelCase: str ) -> BWTTransformDict:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_snake_case = all_rotations(_UpperCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_snake_case = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_UpperCamelCase ),
}
return response
def __a ( _UpperCamelCase: str , _UpperCamelCase: int ) -> str:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_snake_case = int(_UpperCamelCase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(_UpperCamelCase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_snake_case = [""] * len(_UpperCamelCase )
for _ in range(len(_UpperCamelCase ) ):
for i in range(len(_UpperCamelCase ) ):
_snake_case = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
UpperCamelCase_ : Any = '''Provide a string that I will generate its BWT transform: '''
UpperCamelCase_ : Optional[Any] = input(entry_msg).strip()
UpperCamelCase_ : Union[str, Any] = bwt_transform(s)
print(
F'Burrows Wheeler transform for string \'{s}\' results '
F'in \'{result["bwt_string"]}\''
)
UpperCamelCase_ : List[Any] = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
F'we get original string \'{original_string}\''
)
| 142
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __a ( _UpperCamelCase: Tuple ) -> Union[str, Any]:
"""simple docstring"""
_snake_case = os.path.join(args.tf_model_dir , "parameters.json" )
_snake_case = json.loads(open(_UpperCamelCase ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
_snake_case = args.output + ".pt"
_snake_case = OrderedDict()
with tf.device("/CPU:0" ):
_snake_case = tf.train.load_checkpoint(args.tf_model_dir )
_snake_case = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_snake_case = reader.get_tensor(_UpperCamelCase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_snake_case = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_snake_case = 8
_snake_case = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/moe" ):
_snake_case = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/softmlp/kernel" ):
_snake_case = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_snake_case = key_name[-9:-7]
for i in range(16 ):
_snake_case = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_snake_case = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/mlp" ):
_snake_case = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p1/bias" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p2/kernel" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p2/bias" ):
_snake_case = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/ln" ):
_snake_case = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_snake_case = "model.blocks.%d.feed_forward.norm.bias" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/g" ):
_snake_case = "model.blocks.%d.feed_forward.norm.weight" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/att" ):
_snake_case = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_snake_case = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_snake_case = state[:, 0, :, :]
_snake_case = state[:, 1, :, :]
_snake_case = state[:, 2, :, :]
_snake_case = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_snake_case = torch.tensor(_UpperCamelCase )
_snake_case = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_snake_case = torch.tensor(_UpperCamelCase )
_snake_case = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/o/kernel" ):
_snake_case = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_snake_case = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/an" ):
_snake_case = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_snake_case = "model.blocks.%d.self_attn.norm.bias" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/g" ):
_snake_case = "model.blocks.%d.self_attn.norm.weight" % player
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_snake_case = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_snake_case = "model.%s.weight" % nlayer
_snake_case = vnp.copy() # same in embedded
_snake_case = torch.tensor(_UpperCamelCase )
if key_name.startswith("model/wte" ):
_snake_case = "lm_head.weight"
_snake_case = vnp.copy() # same in embedded
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/wob" ):
_snake_case = "final_logits_bias"
_snake_case = vnp.copy() # same in embedded
_snake_case = state.reshape((1, -1) )
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name == "model/dense/kernel":
_snake_case = "model.last_project.weight"
_snake_case = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_snake_case = torch.tensor(_UpperCamelCase )
elif key_name == "model/dense_1/bias":
_snake_case = "model.last_project.bias"
_snake_case = vnp.copy() # same because it is one dimensional
_snake_case = torch.tensor(_UpperCamelCase )
torch.save(_UpperCamelCase , args.output )
if __name__ == "__main__":
UpperCamelCase_ : Tuple = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
UpperCamelCase_ : Any = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 142
| 1
|
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = []
if len(_UpperCAmelCase ) == 1:
return [nums.copy()]
for _ in range(len(_UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_: Any = nums.pop(0 )
SCREAMING_SNAKE_CASE_: List[str] = permute(_UpperCAmelCase )
for perm in permutations:
perm.append(_UpperCAmelCase )
result.extend(_UpperCAmelCase )
nums.append(_UpperCAmelCase )
return result
def A_ ( _UpperCAmelCase ):
def backtrack(_UpperCAmelCase ):
if start == len(_UpperCAmelCase ) - 1:
output.append(nums[:] )
else:
for i in range(_UpperCAmelCase , len(_UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = nums[i], nums[start]
backtrack(start + 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = nums[i], nums[start] # backtrack
SCREAMING_SNAKE_CASE_: Optional[Any] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
lowerCAmelCase : List[Any] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 13
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = XGLMTokenizer
_UpperCAmelCase : List[Any] = XGLMTokenizerFast
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Tuple = True
def _SCREAMING_SNAKE_CASE ( self : Tuple):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_: List[Any] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = "<pad>"
SCREAMING_SNAKE_CASE_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[int] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(len(lowerCAmelCase__) , 1008)
def _SCREAMING_SNAKE_CASE ( self : Any):
self.assertEqual(self.get_tokenizer().vocab_size , 1008)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M")
def _SCREAMING_SNAKE_CASE ( self : str):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name)
SCREAMING_SNAKE_CASE_: Tuple = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = pickle.dumps(lowerCAmelCase__)
pickle.loads(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_: Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: Any = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: str = tokenizer.encode(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Dict = "Hello World!"
SCREAMING_SNAKE_CASE_: Union[str, Any] = [2, 3_1227, 4447, 35]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
SCREAMING_SNAKE_CASE_: Optional[Any] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
# fmt: off
SCREAMING_SNAKE_CASE_: str = {
"input_ids": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/xglm-564M" , padding=lowerCAmelCase__ , )
| 13
| 1
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__magic_name__ :torch.FloatTensor
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self , __UpperCAmelCase = 3_2 , __UpperCAmelCase = 6_4 , __UpperCAmelCase = 2_0 , __UpperCAmelCase = 7_6_8 , __UpperCAmelCase=7_7 , __UpperCAmelCase=4 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = "silu" , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "linear" , __UpperCAmelCase = "prd" , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :Optional[Any] = num_attention_heads
lowerCAmelCase__ :List[str] = attention_head_dim
lowerCAmelCase__ :List[str] = num_attention_heads * attention_head_dim
lowerCAmelCase__ :List[Any] = additional_embeddings
lowerCAmelCase__ :Union[str, Any] = time_embed_dim or inner_dim
lowerCAmelCase__ :Tuple = embedding_proj_dim or embedding_dim
lowerCAmelCase__ :Dict = clip_embed_dim or embedding_dim
lowerCAmelCase__ :Dict = Timesteps(_snake_case , _snake_case , 0 )
lowerCAmelCase__ :Optional[Any] = TimestepEmbedding(_snake_case , _snake_case , out_dim=_snake_case , act_fn=_snake_case )
lowerCAmelCase__ :Tuple = nn.Linear(_snake_case , _snake_case )
if embedding_proj_norm_type is None:
lowerCAmelCase__ :Any = None
elif embedding_proj_norm_type == "layer":
lowerCAmelCase__ :Optional[Any] = nn.LayerNorm(_snake_case )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
lowerCAmelCase__ :List[str] = nn.Linear(_snake_case , _snake_case )
if encoder_hid_proj_type is None:
lowerCAmelCase__ :List[Any] = None
elif encoder_hid_proj_type == "linear":
lowerCAmelCase__ :str = nn.Linear(_snake_case , _snake_case )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
lowerCAmelCase__ :Tuple = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _snake_case ) )
if added_emb_type == "prd":
lowerCAmelCase__ :int = nn.Parameter(torch.zeros(1 , 1 , _snake_case ) )
elif added_emb_type is None:
lowerCAmelCase__ :Union[str, Any] = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`." )
lowerCAmelCase__ :Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(
_snake_case , _snake_case , _snake_case , dropout=_snake_case , activation_fn='gelu' , attention_bias=_snake_case , )
for d in range(_snake_case )
] )
if norm_in_type == "layer":
lowerCAmelCase__ :Dict = nn.LayerNorm(_snake_case )
elif norm_in_type is None:
lowerCAmelCase__ :List[str] = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
lowerCAmelCase__ :int = nn.LayerNorm(_snake_case )
lowerCAmelCase__ :int = nn.Linear(_snake_case , _snake_case )
lowerCAmelCase__ :Tuple = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
lowerCAmelCase__ :List[str] = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , _snake_case , persistent=_snake_case )
lowerCAmelCase__ :Any = nn.Parameter(torch.zeros(1 , _snake_case ) )
lowerCAmelCase__ :str = nn.Parameter(torch.zeros(1 , _snake_case ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = {}
def fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if hasattr(_snake_case , 'set_processor' ):
lowerCAmelCase__ :Any = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , _snake_case , _snake_case )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_snake_case , _snake_case , _snake_case )
return processors
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_snake_case , _snake_case ) and len(_snake_case ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(_snake_case )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if hasattr(_snake_case , 'set_processor' ):
if not isinstance(_snake_case , _snake_case ):
module.set_processor(_snake_case )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , _snake_case , _snake_case )
for name, module in self.named_children():
fn_recursive_attn_processor(_snake_case , _snake_case , _snake_case )
def snake_case ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , ):
'''simple docstring'''
lowerCAmelCase__ :str = hidden_states.shape[0]
lowerCAmelCase__ :Optional[Any] = timestep
if not torch.is_tensor(_snake_case ):
lowerCAmelCase__ :Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_snake_case ) and len(timesteps.shape ) == 0:
lowerCAmelCase__ :Union[str, Any] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCAmelCase__ :Optional[Any] = timesteps * torch.ones(_snake_case , dtype=timesteps.dtype , device=timesteps.device )
lowerCAmelCase__ :int = self.time_proj(_snake_case )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
lowerCAmelCase__ :str = timesteps_projected.to(dtype=self.dtype )
lowerCAmelCase__ :Optional[int] = self.time_embedding(_snake_case )
if self.embedding_proj_norm is not None:
lowerCAmelCase__ :Optional[int] = self.embedding_proj_norm(_snake_case )
lowerCAmelCase__ :Optional[Any] = self.embedding_proj(_snake_case )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
lowerCAmelCase__ :Union[str, Any] = self.encoder_hidden_states_proj(_snake_case )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
lowerCAmelCase__ :str = self.proj_in(_snake_case )
lowerCAmelCase__ :Dict = self.positional_embedding.to(hidden_states.dtype )
lowerCAmelCase__ :Tuple = []
lowerCAmelCase__ :List[str] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_snake_case )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
lowerCAmelCase__ :Dict = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
lowerCAmelCase__ :Optional[int] = hidden_states[:, None, :]
lowerCAmelCase__ :Tuple = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
lowerCAmelCase__ :Tuple = self.prd_embedding.to(hidden_states.dtype ).expand(_snake_case , -1 , -1 )
additional_embeds.append(_snake_case )
lowerCAmelCase__ :str = torch.cat(
_snake_case , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
lowerCAmelCase__ :Tuple = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
lowerCAmelCase__ :Tuple = F.pad(
_snake_case , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
lowerCAmelCase__ :List[str] = hidden_states + positional_embeddings
if attention_mask is not None:
lowerCAmelCase__ :int = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
lowerCAmelCase__ :Optional[int] = F.pad(_snake_case , (0, self.additional_embeddings) , value=0.0 )
lowerCAmelCase__ :Any = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
lowerCAmelCase__ :List[str] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
lowerCAmelCase__ :Union[str, Any] = self.norm_in(_snake_case )
for block in self.transformer_blocks:
lowerCAmelCase__ :str = block(_snake_case , attention_mask=_snake_case )
lowerCAmelCase__ :str = self.norm_out(_snake_case )
if self.prd_embedding is not None:
lowerCAmelCase__ :Tuple = hidden_states[:, -1]
else:
lowerCAmelCase__ :Any = hidden_states[:, additional_embeddings_len:]
lowerCAmelCase__ :Tuple = self.proj_to_clip_embeddings(_snake_case )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_snake_case )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 351
|
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) ->str:
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase__ :List[str] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase__ :Tuple = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase__ :Union[str, Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
if decoder_head_mask is None:
lowerCAmelCase__ :List[str] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
if cross_attn_head_mask is None:
lowerCAmelCase__ :List[str] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=9_9 , __UpperCAmelCase=1_6 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="relu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=2_0 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = parent
lowerCAmelCase__ :Any = batch_size
lowerCAmelCase__ :Optional[Any] = seq_length
lowerCAmelCase__ :int = is_training
lowerCAmelCase__ :Tuple = use_labels
lowerCAmelCase__ :Union[str, Any] = vocab_size
lowerCAmelCase__ :Tuple = hidden_size
lowerCAmelCase__ :Tuple = num_hidden_layers
lowerCAmelCase__ :Tuple = num_attention_heads
lowerCAmelCase__ :Dict = intermediate_size
lowerCAmelCase__ :Optional[int] = hidden_act
lowerCAmelCase__ :Any = hidden_dropout_prob
lowerCAmelCase__ :Dict = attention_probs_dropout_prob
lowerCAmelCase__ :Tuple = encoder_layerdrop
lowerCAmelCase__ :Tuple = decoder_layerdrop
lowerCAmelCase__ :Tuple = max_position_embeddings
lowerCAmelCase__ :Any = eos_token_id
lowerCAmelCase__ :str = pad_token_id
lowerCAmelCase__ :Tuple = bos_token_id
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ :Tuple = self.eos_token_id # Eos Token
lowerCAmelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase__ :List[Any] = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase__ :Dict = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase__ :Optional[Any] = self.get_config()
lowerCAmelCase__ :Any = prepare_mam_aaa_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def snake_case ( self ):
'''simple docstring'''
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = MaMaaaModel(config=__UpperCAmelCase ).get_decoder().to(__UpperCAmelCase ).eval()
lowerCAmelCase__ :Optional[int] = inputs_dict['input_ids']
lowerCAmelCase__ :Any = inputs_dict['attention_mask']
lowerCAmelCase__ :Tuple = inputs_dict['head_mask']
# first forward pass
lowerCAmelCase__ :int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ :Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ :int = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCAmelCase__ :Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ :Union[str, Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCAmelCase__ :Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )['last_hidden_state']
lowerCAmelCase__ :Optional[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[
'last_hidden_state'
]
# select random slice
lowerCAmelCase__ :Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ :List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ :Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 ) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = MaMaaaModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval()
lowerCAmelCase__ :List[Any] = model(**__UpperCAmelCase )
lowerCAmelCase__ :int = outputs.encoder_last_hidden_state
lowerCAmelCase__ :Any = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ :Union[str, Any] = model.get_encoder()
encoder.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Any = MaMaaaEncoder.from_pretrained(__UpperCAmelCase ).to(__UpperCAmelCase )
lowerCAmelCase__ :Any = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ :Optional[int] = model.get_decoder()
decoder.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Dict = MaMaaaDecoder.from_pretrained(__UpperCAmelCase ).to(__UpperCAmelCase )
lowerCAmelCase__ :int = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _lowerCAmelCase ( a , a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__magic_name__ :str = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__magic_name__ :str = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__magic_name__ :Any = True
__magic_name__ :Union[str, Any] = True
__magic_name__ :Tuple = False
__magic_name__ :List[str] = False
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = MaMaaaModelTester(self )
lowerCAmelCase__ :Tuple = ConfigTester(self , config_class=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCAmelCase__ :str = model_class(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = model_class.from_pretrained(__UpperCAmelCase , output_loading_info=__UpperCAmelCase )
self.assertEqual(info['missing_keys'] , [] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowerCAmelCase__ :Optional[int] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :List[Any] = copy.deepcopy(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
if not self.is_encoder_decoder:
lowerCAmelCase__ :List[str] = inputs['input_ids']
del inputs["input_ids"]
else:
lowerCAmelCase__ :int = inputs['input_ids']
lowerCAmelCase__ :str = inputs.get('decoder_input_ids' , __UpperCAmelCase )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowerCAmelCase__ :Tuple = wte(__UpperCAmelCase )
else:
lowerCAmelCase__ :List[Any] = wte(__UpperCAmelCase )
lowerCAmelCase__ :Dict = wte(__UpperCAmelCase )
with torch.no_grad():
model(**__UpperCAmelCase )[0]
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :int = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ :Any = input_dict['input_ids']
lowerCAmelCase__ :Optional[Any] = input_ids.ne(1 ).to(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = MaMaaaForConditionalGeneration(__UpperCAmelCase ).eval().to(__UpperCAmelCase )
if torch_device == "cuda":
model.half()
model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
model.generate(num_beams=4 , do_sample=__UpperCAmelCase , early_stopping=__UpperCAmelCase , num_return_sequences=3 )
def __A (_SCREAMING_SNAKE_CASE ) ->Dict:
"""simple docstring"""
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
__A = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(__UpperCAmelCase )
lowerCAmelCase__ :str = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
lowerCAmelCase__ :Optional[int] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
lowerCAmelCase__ :Union[str, Any] = prepare_mam_aaa_inputs_dict(model.config , __UpperCAmelCase , __UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :Any = model(**__UpperCAmelCase )[0]
lowerCAmelCase__ :List[str] = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __UpperCAmelCase )
# change to expected output here
lowerCAmelCase__ :int = torch.tensor(
[[-0.77_80, -0.16_76, 0.10_38], [-6.75_56, -1.39_92, 0.05_67], [-7.53_83, -0.59_20, -0.27_79]] , device=__UpperCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(__UpperCAmelCase )
# change to intended input
lowerCAmelCase__ :str = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
lowerCAmelCase__ :Any = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
lowerCAmelCase__ :List[Any] = prepare_mam_aaa_inputs_dict(model.config , __UpperCAmelCase , __UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :List[Any] = model(**__UpperCAmelCase )[0]
lowerCAmelCase__ :Any = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
# change to expected output here
lowerCAmelCase__ :List[Any] = torch.tensor(
[[-1.04_48, -1.04_11, 3.79_92], [-3.21_91, -3.23_86, -1.34_51], [-3.62_10, -3.59_93, 0.49_25]] , device=__UpperCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
lowerCAmelCase__ :Tuple = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='pt' )
lowerCAmelCase__ :List[Any] = model.generate(
input_ids=dct['input_ids'].to(__UpperCAmelCase ) , attention_mask=dct['attention_mask'].to(__UpperCAmelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
lowerCAmelCase__ :Optional[Any] = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
lowerCAmelCase__ :Any = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
assert generated == expected_en
| 254
| 0
|
'''simple docstring'''
def _UpperCamelCase ( __A ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
UpperCamelCase__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ = 1
if upper_limit > 0:
UpperCamelCase__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__A ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
a__ : Tuple = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 80
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ ( a__ , unittest.TestCase ):
__UpperCAmelCase = CLIPTokenizer
__UpperCAmelCase = CLIPTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = {}
__UpperCAmelCase = False
def __a ( self ):
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a , range(len(a ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a ) )
def __a ( self , **a ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a )
def __a ( self , **a ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a )
def __a ( self , a ):
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def __a ( self ):
UpperCamelCase__ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a )
self.assertListEqual(a , a )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@require_ftfy
def __a ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a , **a )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a , **a )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
def __a ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'''{text_of_1_token} {text_of_1_token}'''
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
UpperCamelCase__ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
UpperCamelCase__ = f''' {text}'''
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
UpperCamelCase__ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
def __a ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(a ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def __a ( self ):
super().test_tokenization_python_rust_equals()
def __a ( self ):
# CLIP always lower cases letters
pass
| 80
| 1
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase__ : Optional[int] = 16
UpperCamelCase__ : Optional[Any] = 32
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return int(x / 2**20 )
class a__ :
def __enter__( self ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__lowerCAmelCase = torch.cuda.memory_allocated()
return self
def __exit__( self , *_A ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
__lowerCAmelCase = torch.cuda.memory_allocated()
__lowerCAmelCase = torch.cuda.max_memory_allocated()
__lowerCAmelCase = bamb(self.end - self.begin )
__lowerCAmelCase = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def _a ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int = 16 , SCREAMING_SNAKE_CASE_ : str = "bert-base-cased" , SCREAMING_SNAKE_CASE_ : int = 3_20 , SCREAMING_SNAKE_CASE_ : int = 1_60 , ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ )
__lowerCAmelCase = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(SCREAMING_SNAKE_CASE_ : Any ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCAmelCase = datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=lowerCamelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase_ , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(lowerCamelCase_ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
__lowerCAmelCase = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
return train_dataloader, eval_dataloader
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any ):
__lowerCAmelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase = config["""lr"""]
__lowerCAmelCase = int(config["num_epochs"] )
__lowerCAmelCase = int(config["seed"] )
__lowerCAmelCase = int(config["batch_size"] )
__lowerCAmelCase = args.model_name_or_path
set_seed(lowerCamelCase_ )
__lowerCAmelCase = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
# Instantiate optimizer
__lowerCAmelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCAmelCase = optimizer_cls(params=model.parameters() , lr=lowerCamelCase_ )
if accelerator.state.deepspeed_plugin is not None:
__lowerCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowerCAmelCase = 1
__lowerCAmelCase = (len(lowerCamelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase_ , num_warmup_steps=0 , num_training_steps=lowerCamelCase_ , )
else:
__lowerCAmelCase = DummyScheduler(lowerCamelCase_ , total_num_steps=lowerCamelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase = accelerator.prepare(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# We need to keep track of how many total steps we have iterated over
__lowerCAmelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCAmelCase = 0
# Now we train the model
__lowerCAmelCase = {}
for epoch in range(lowerCamelCase_ , lowerCamelCase_ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(lowerCamelCase_ ):
__lowerCAmelCase = model(**lowerCamelCase_ )
__lowerCAmelCase = outputs.loss
__lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__lowerCAmelCase = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def _a ( ):
__lowerCAmelCase = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=lowerCamelCase_ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=lowerCamelCase_ , )
parser.add_argument(
"--output_dir" , type=lowerCamelCase_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=lowerCamelCase_ , default=lowerCamelCase_ , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=lowerCamelCase_ , default=3_20 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=lowerCamelCase_ , default=1_60 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=lowerCamelCase_ , default=1 , help="Number of train epochs." , )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 358
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class a__ ( snake_case__ ):
_a : Optional[int] = """new-model"""
if is_tf_available():
class a__ ( snake_case__ ):
_a : Dict = NewModelConfig
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "bert-base-cased"
__lowerCAmelCase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = TFAutoModel.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "bert-base-cased"
__lowerCAmelCase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = TFAutoModelForPreTraining.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained(_A )
__lowerCAmelCase , __lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = TFAutoModelForMaskedLM.from_pretrained(_A )
__lowerCAmelCase , __lowerCAmelCase = TFAutoModelForMaskedLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(_A )
__lowerCAmelCase , __lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = TFAutoModelForQuestionAnswering.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
@require_tensorflow_probability
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__lowerCAmelCase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = TFAutoModelForTableQuestionAnswering.from_pretrained(_A )
__lowerCAmelCase , __lowerCAmelCase = TFAutoModelForTableQuestionAnswering.from_pretrained(
_A , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=_A ) , 1_4_4_1_0 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(_A )
self.assertIsInstance(_A , _A )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=_A ) , 1_4_4_1_0 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = copy.deepcopy(model.config )
__lowerCAmelCase = ["FunnelBaseModel"]
__lowerCAmelCase = TFAutoModel.from_config(_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
__lowerCAmelCase = TFAutoModel.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
try:
AutoConfig.register("new-model" , _A )
__lowerCAmelCase = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_A ):
auto_class.register(_A , _A )
auto_class.register(_A , _A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
auto_class.register(_A , _A )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCAmelCase = BertModelTester(self ).get_config()
__lowerCAmelCase = NewModelConfig(**tiny_config.to_dict() )
__lowerCAmelCase = auto_class.from_config(_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A )
__lowerCAmelCase = auto_class.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_A , "bert-base is not a local folder and is not a valid model identifier" ):
__lowerCAmelCase = TFAutoModel.from_pretrained("bert-base" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_A , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__lowerCAmelCase = TFAutoModel.from_pretrained(_A , revision="aaaaaa" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_A , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
__lowerCAmelCase = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
with self.assertRaisesRegex(_A , "Use `from_pt=True` to load this model" ):
__lowerCAmelCase = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
__lowerCAmelCase = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__lowerCAmelCase = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
__lowerCAmelCase = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 102
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.