code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
lowerCAmelCase_ = logging.getLogger(__name__)
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = '''summarization'''
lowerCamelCase_ : int = ['''loss''']
lowerCamelCase_ : List[str] = ROUGE_KEYS
lowerCamelCase_ : Tuple = '''rouge2'''
def __init__(self , __magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
snake_case_ : List[Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(__magic_name__ , num_labels=__magic_name__ , mode=self.mode , **__magic_name__ )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
snake_case_ : Union[str, Any] = Path(self.output_dir ) / '''metrics.json'''
snake_case_ : Union[str, Any] = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
snake_case_ : Any = 0
snake_case_ : Any = defaultdict(__magic_name__ )
snake_case_ : List[Any] = self.config.model_type
snake_case_ : str = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
snake_case_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
snake_case_ : List[Any] = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
snake_case_ : str = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
snake_case_ : Dict = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
snake_case_ : Tuple = get_git_info()['''repo_sha''']
snake_case_ : str = hparams.num_workers
snake_case_ : List[Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __magic_name__ ):
snake_case_ : str = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
snake_case_ : Any = self.decoder_start_token_id
snake_case_ : Dict = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
snake_case_ : str = False
snake_case_ : List[Any] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
snake_case_ : Any = self.hparams.eval_max_gen_length
else:
snake_case_ : Optional[Any] = self.model.config.max_length
snake_case_ : Union[str, Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowerCamelCase (self , __magic_name__ ) -> Dict[str, List[str]]:
'''simple docstring'''
snake_case_ : Tuple = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(__magic_name__ , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
snake_case_ : int = True
return readable_batch
def lowerCamelCase (self , __magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
return self.model(__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.tokenizer.batch_decode(
__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
return lmap(str.strip , __magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Tuple:
'''simple docstring'''
snake_case_ : int = self.tokenizer.pad_token_id
snake_case_ , snake_case_ : Tuple = batch['''input_ids'''], batch['''attention_mask''']
snake_case_ : Dict = batch['''labels''']
if isinstance(self.model , __magic_name__ ):
snake_case_ : Tuple = self.model._shift_right(__magic_name__ )
else:
snake_case_ : Any = shift_tokens_right(__magic_name__ , __magic_name__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
snake_case_ : Union[str, Any] = decoder_input_ids
self.save_readable_batch(__magic_name__ )
snake_case_ : List[Any] = self(__magic_name__ , attention_mask=__magic_name__ , decoder_input_ids=__magic_name__ , use_cache=__magic_name__ )
snake_case_ : List[Any] = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
snake_case_ : Dict = nn.CrossEntropyLoss(ignore_index=__magic_name__ )
assert lm_logits.shape[-1] == self.vocab_size
snake_case_ : List[str] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
snake_case_ : Union[str, Any] = nn.functional.log_softmax(__magic_name__ , dim=-1 )
snake_case_ , snake_case_ : List[Any] = label_smoothed_nll_loss(
__magic_name__ , __magic_name__ , self.hparams.label_smoothing , ignore_index=__magic_name__ )
return (loss,)
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return self.tokenizer.pad_token_id
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self._step(__magic_name__ )
snake_case_ : Union[str, Any] = dict(zip(self.loss_names , __magic_name__ ) )
# tokens per batch
snake_case_ : Union[str, Any] = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
snake_case_ : Union[str, Any] = batch['''input_ids'''].shape[0]
snake_case_ : Any = batch['''input_ids'''].eq(self.pad ).sum()
snake_case_ : Optional[int] = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
return self._generative_step(__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__="val" ) -> Dict:
'''simple docstring'''
self.step_count += 1
snake_case_ : str = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
snake_case_ : Dict = losses['''loss''']
snake_case_ : int = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
snake_case_ : Tuple = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
snake_case_ : torch.FloatTensor = torch.tensor(__magic_name__ ).type_as(__magic_name__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(__magic_name__ )
snake_case_ : int = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
snake_case_ : Union[str, Any] = self.step_count
self.metrics[prefix].append(__magic_name__ ) # callback writes this to self.metrics_save_path
snake_case_ : Union[str, Any] = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
return calculate_rouge(__magic_name__ , __magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> dict:
'''simple docstring'''
snake_case_ : int = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
snake_case_ : List[Any] = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=__magic_name__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
snake_case_ : str = (time.time() - ta) / batch['''input_ids'''].shape[0]
snake_case_ : List[str] = self.ids_to_clean_text(__magic_name__ )
snake_case_ : List[str] = self.ids_to_clean_text(batch['''labels'''] )
snake_case_ : Any = self._step(__magic_name__ )
snake_case_ : List[str] = dict(zip(self.loss_names , __magic_name__ ) )
snake_case_ : Dict = self.calc_generative_metrics(__magic_name__ , __magic_name__ )
snake_case_ : Any = np.mean(lmap(__magic_name__ , __magic_name__ ) )
base_metrics.update(gen_time=__magic_name__ , gen_len=__magic_name__ , preds=__magic_name__ , target=__magic_name__ , **__magic_name__ )
return base_metrics
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
return self._generative_step(__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return self.validation_epoch_end(__magic_name__ , prefix='''test''' )
def lowerCamelCase (self , __magic_name__ ) -> SeqaSeqDataset:
'''simple docstring'''
snake_case_ : Dict = self.n_obs[type_path]
snake_case_ : Tuple = self.target_lens[type_path]
snake_case_ : Tuple = self.dataset_class(
self.tokenizer , type_path=__magic_name__ , n_obs=__magic_name__ , max_target_length=__magic_name__ , **self.dataset_kwargs , )
return dataset
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ = False ) -> DataLoader:
'''simple docstring'''
snake_case_ : Tuple = self.get_dataset(__magic_name__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
snake_case_ : Union[str, Any] = dataset.make_sortish_sampler(__magic_name__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
__magic_name__ , batch_size=__magic_name__ , collate_fn=dataset.collate_fn , shuffle=__magic_name__ , num_workers=self.num_workers , sampler=__magic_name__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
snake_case_ : Tuple = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
__magic_name__ , batch_sampler=__magic_name__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
__magic_name__ , batch_size=__magic_name__ , collate_fn=dataset.collate_fn , shuffle=__magic_name__ , num_workers=self.num_workers , sampler=__magic_name__ , )
def lowerCamelCase (self ) -> DataLoader:
'''simple docstring'''
snake_case_ : List[str] = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=__magic_name__ )
return dataloader
def lowerCamelCase (self ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def lowerCamelCase (self ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
BaseTransformer.add_model_specific_args(__magic_name__ , __magic_name__ )
add_generic_args(__magic_name__ , __magic_name__ )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=__magic_name__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=__magic_name__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=__magic_name__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=__magic_name__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=__magic_name__ )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=__magic_name__ )
parser.add_argument('''--max_tokens_per_batch''' , type=__magic_name__ , default=__magic_name__ )
parser.add_argument('''--logger_name''' , type=__magic_name__ , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=__magic_name__ , default=-1 , required=__magic_name__ , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=__magic_name__ , default=500 , required=__magic_name__ , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=__magic_name__ , default=-1 , required=__magic_name__ , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=__magic_name__ , default='''summarization''' , required=__magic_name__ , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=__magic_name__ , default=0.0 , required=__magic_name__ )
parser.add_argument('''--src_lang''' , type=__magic_name__ , default='''''' , required=__magic_name__ )
parser.add_argument('''--tgt_lang''' , type=__magic_name__ , default='''''' , required=__magic_name__ )
parser.add_argument('''--eval_beams''' , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ )
parser.add_argument(
'''--val_metric''' , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=__magic_name__ , default=__magic_name__ , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=__magic_name__ , default=1 , required=__magic_name__ , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=__magic_name__ , default=-1 , required=__magic_name__ , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Dict = '''translation'''
lowerCamelCase_ : List[str] = ['''loss''']
lowerCamelCase_ : Any = ['''bleu''']
lowerCamelCase_ : Dict = '''bleu'''
def __init__(self , __magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
super().__init__(__magic_name__ , **__magic_name__ )
snake_case_ : Union[str, Any] = hparams.src_lang
snake_case_ : Optional[int] = hparams.tgt_lang
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> dict:
'''simple docstring'''
return calculate_bleu(__magic_name__ , __magic_name__ )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=None ) -> SummarizationModule:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=_UpperCamelCase )
check_output_dir(_UpperCamelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
snake_case_ : SummarizationModule = SummarizationModule(_UpperCamelCase )
else:
snake_case_ : SummarizationModule = TranslationModule(_UpperCamelCase )
snake_case_ : int = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
snake_case_ : List[str] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
snake_case_ : Dict = os.environ.get('''WANDB_PROJECT''' , _UpperCamelCase )
snake_case_ : str = WandbLogger(name=model.output_dir.name , project=_UpperCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
snake_case_ : str = WandbLogger(name=model.output_dir.name , project=f'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
snake_case_ : List[str] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
snake_case_ : Tuple = False
snake_case_ : List[str] = args.val_metric == '''loss'''
snake_case_ : pl.Trainer = generic_train(
_UpperCamelCase , _UpperCamelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _UpperCamelCase ) , early_stopping_callback=_UpperCamelCase , logger=_UpperCamelCase , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
snake_case_ : List[Any] = ''''''
snake_case_ : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=_UpperCamelCase ) )
if checkpoints:
snake_case_ : int = checkpoints[-1]
snake_case_ : Tuple = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
lowerCAmelCase_ = pl.Trainer.add_argparse_args(parser)
lowerCAmelCase_ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
lowerCAmelCase_ = parser.parse_args()
main(args)
| 60 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return getitem, k
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
return setitem, k, v
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
return delitem, k
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) -> str:
"""simple docstring"""
try:
return fun(_UpperCamelCase , *_UpperCamelCase ), None
except Exception as e:
return None, e
lowerCAmelCase_ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCAmelCase_ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCAmelCase_ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCAmelCase_ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCAmelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCAmelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
snake_case_ : Any = HashMap(initial_block_size=4 )
snake_case_ : Union[str, Any] = {}
for _, (fun, *args) in enumerate(_UpperCamelCase ):
snake_case_ , snake_case_ : str = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
assert my_res == py_res
assert str(_UpperCamelCase ) == str(_UpperCamelCase )
assert set(_UpperCamelCase ) == set(_UpperCamelCase )
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
assert set(my.items() ) == set(py.items() )
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
def is_public(_UpperCamelCase ) -> bool:
return not name.startswith('''_''' )
snake_case_ : str = {name for name in dir({} ) if is_public(_UpperCamelCase )}
snake_case_ : str = {name for name in dir(HashMap() ) if is_public(_UpperCamelCase )}
assert dict_public_names > hash_public_names
| 60 | 1 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( _a ):
def lowerCamelCase (self , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
if isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : Optional[Any] = [label.strip() for label in labels.split(''',''' ) if label.strip()]
return labels
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
if len(__magic_name__ ) == 0 or len(__magic_name__ ) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(__magic_name__ ) )
if isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : Union[str, Any] = [sequences]
snake_case_ : Dict = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__magic_name__ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_a )
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__=ZeroShotClassificationArgumentHandler() , *__magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = args_parser
super().__init__(*__magic_name__ , **__magic_name__ )
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' )
@property
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail''' ):
return ind
return -1
def lowerCamelCase (self , __magic_name__ , __magic_name__=True , __magic_name__=True , __magic_name__=TruncationStrategy.ONLY_FIRST , **__magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''' )
snake_case_ : List[str] = self.tokenizer.eos_token
try:
snake_case_ : List[str] = self.tokenizer(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , )
except Exception as e:
if "too short" in str(__magic_name__ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
snake_case_ : Union[str, Any] = self.tokenizer(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def lowerCamelCase (self , **__magic_name__ ) -> int:
'''simple docstring'''
if kwargs.get('''multi_class''' , __magic_name__ ) is not None:
snake_case_ : Optional[int] = kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''' )
snake_case_ : Optional[int] = {}
if "candidate_labels" in kwargs:
snake_case_ : Any = self._args_parser._parse_labels(kwargs['''candidate_labels'''] )
if "hypothesis_template" in kwargs:
snake_case_ : str = kwargs['''hypothesis_template''']
snake_case_ : Optional[int] = {}
if "multi_label" in kwargs:
snake_case_ : Optional[int] = kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__(self , __magic_name__ , *__magic_name__ , **__magic_name__ , ) -> Dict:
'''simple docstring'''
if len(__magic_name__ ) == 0:
pass
elif len(__magic_name__ ) == 1 and "candidate_labels" not in kwargs:
snake_case_ : Any = args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__=None , __magic_name__="This example is {}." ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ : List[str] = self._args_parser(__magic_name__ , __magic_name__ , __magic_name__ )
for i, (candidate_label, sequence_pair) in enumerate(zip(__magic_name__ , __magic_name__ ) ):
snake_case_ : List[str] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__magic_name__ ) - 1,
**model_input,
}
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : int = inputs['''candidate_label''']
snake_case_ : Dict = inputs['''sequence''']
snake_case_ : Optional[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
snake_case_ : int = self.model(**__magic_name__ )
snake_case_ : List[str] = {
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def lowerCamelCase (self , __magic_name__ , __magic_name__=False ) -> str:
'''simple docstring'''
snake_case_ : str = [outputs['''candidate_label'''] for outputs in model_outputs]
snake_case_ : Dict = [outputs['''sequence'''] for outputs in model_outputs]
snake_case_ : str = np.concatenate([output['''logits'''].numpy() for output in model_outputs] )
snake_case_ : Union[str, Any] = logits.shape[0]
snake_case_ : Optional[int] = len(__magic_name__ )
snake_case_ : Optional[Any] = N // n
snake_case_ : List[str] = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__magic_name__ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
snake_case_ : Optional[Any] = self.entailment_id
snake_case_ : Optional[int] = -1 if entailment_id == 0 else 0
snake_case_ : Tuple = reshaped_outputs[..., [contradiction_id, entailment_id]]
snake_case_ : Tuple = np.exp(__magic_name__ ) / np.exp(__magic_name__ ).sum(-1 , keepdims=__magic_name__ )
snake_case_ : Optional[int] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
snake_case_ : Dict = reshaped_outputs[..., self.entailment_id]
snake_case_ : Union[str, Any] = np.exp(__magic_name__ ) / np.exp(__magic_name__ ).sum(-1 , keepdims=__magic_name__ )
snake_case_ : Dict = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 60 |
from __future__ import annotations
def lowerCamelCase_ ( _UpperCamelCase ) -> list:
"""simple docstring"""
if len(_UpperCamelCase ) == 0:
return []
snake_case_ , snake_case_ : Dict = min(_UpperCamelCase ), max(_UpperCamelCase )
snake_case_ : List[str] = int(max_value - min_value ) + 1
snake_case_ : list[list] = [[] for _ in range(_UpperCamelCase )]
for i in my_list:
buckets[int(i - min_value )].append(_UpperCamelCase )
return [v for bucket in buckets for v in sorted(_UpperCamelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 60 | 1 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCAmelCase_ = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCAmelCase_ = '''main'''
# Default branch name
lowerCAmelCase_ = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'''
# One particular commit (not the top of `main`)
lowerCAmelCase_ = '''aaaaaaa'''
# This commit does not exist, so we should 404.
lowerCAmelCase_ = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'''
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCAmelCase_ = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'''
@contextlib.contextmanager
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class __lowerCAmelCase ( unittest.TestCase ):
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def lowerCamelCase (self , __magic_name__ ) -> Any:
'''simple docstring'''
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def lowerCamelCase (self , __magic_name__ ) -> List[str]:
'''simple docstring'''
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def lowerCamelCase (self , __magic_name__ ) -> Tuple:
'''simple docstring'''
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(find_labels(__magic_name__ ) , ['''labels'''] )
self.assertEqual(find_labels(__magic_name__ ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(__magic_name__ ) , ['''start_positions''', '''end_positions'''] )
class __lowerCAmelCase ( _a ):
pass
self.assertEqual(find_labels(__magic_name__ ) , ['''labels'''] )
@require_tf
def lowerCamelCase (self ) -> str:
'''simple docstring'''
self.assertEqual(find_labels(__magic_name__ ) , ['''labels'''] )
self.assertEqual(find_labels(__magic_name__ ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(__magic_name__ ) , ['''start_positions''', '''end_positions'''] )
class __lowerCAmelCase ( _a ):
pass
self.assertEqual(find_labels(__magic_name__ ) , ['''labels'''] )
@require_flax
def lowerCamelCase (self ) -> str:
'''simple docstring'''
self.assertEqual(find_labels(__magic_name__ ) , [] )
self.assertEqual(find_labels(__magic_name__ ) , [] )
self.assertEqual(find_labels(__magic_name__ ) , [] )
class __lowerCAmelCase ( _a ):
pass
self.assertEqual(find_labels(__magic_name__ ) , [] )
| 60 |
import tensorflow as tf
from ...tf_utils import shape_list
class __lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=1 , __magic_name__=False , **__magic_name__ ) -> Dict:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : List[Any] = vocab_size
snake_case_ : Dict = d_embed
snake_case_ : Union[str, Any] = d_proj
snake_case_ : str = cutoffs + [vocab_size]
snake_case_ : int = [0] + self.cutoffs
snake_case_ : Optional[int] = div_val
snake_case_ : int = self.cutoffs[0]
snake_case_ : Any = len(self.cutoffs ) - 1
snake_case_ : Union[str, Any] = self.shortlist_size + self.n_clusters
snake_case_ : str = keep_order
snake_case_ : int = []
snake_case_ : Union[str, Any] = []
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
snake_case_ : Tuple = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_weight''' )
snake_case_ : Optional[Any] = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
snake_case_ : List[str] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' , )
self.out_projs.append(__magic_name__ )
else:
self.out_projs.append(__magic_name__ )
snake_case_ : Optional[Any] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , )
snake_case_ : List[str] = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case_ : Optional[Any] = self.d_embed // (self.div_val**i)
snake_case_ : int = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' )
self.out_projs.append(__magic_name__ )
snake_case_ : int = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , )
snake_case_ : Any = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(__magic_name__ )
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = x
if proj is not None:
snake_case_ : List[str] = tf.einsum('''ibd,ed->ibe''' , __magic_name__ , __magic_name__ )
return tf.einsum('''ibd,nd->ibn''' , __magic_name__ , __magic_name__ ) + b
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = shape_list(__magic_name__ )
snake_case_ : Tuple = tf.range(lp_size[0] , dtype=target.dtype )
snake_case_ : Dict = tf.stack([r, target] , 1 )
return tf.gather_nd(__magic_name__ , __magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=True , __magic_name__=False ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = 0
if self.n_clusters == 0:
snake_case_ : Any = self._logit(__magic_name__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
snake_case_ : Union[str, Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__magic_name__ , logits=__magic_name__ )
snake_case_ : Optional[Any] = tf.nn.log_softmax(__magic_name__ , axis=-1 )
else:
snake_case_ : Optional[int] = shape_list(__magic_name__ )
snake_case_ : int = []
snake_case_ : List[Any] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
snake_case_ : str = (target >= l_idx) & (target < r_idx)
snake_case_ : Dict = tf.where(__magic_name__ )
snake_case_ : List[str] = tf.boolean_mask(__magic_name__ , __magic_name__ ) - l_idx
if self.div_val == 1:
snake_case_ : Any = self.out_layers[0][0][l_idx:r_idx]
snake_case_ : Dict = self.out_layers[0][1][l_idx:r_idx]
else:
snake_case_ : Union[str, Any] = self.out_layers[i][0]
snake_case_ : int = self.out_layers[i][1]
if i == 0:
snake_case_ : List[Any] = tf.concat([cur_W, self.cluster_weight] , 0 )
snake_case_ : Tuple = tf.concat([cur_b, self.cluster_bias] , 0 )
snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[0] )
snake_case_ : Any = tf.nn.log_softmax(__magic_name__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ )
snake_case_ : Tuple = self._gather_logprob(__magic_name__ , __magic_name__ )
else:
snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[i] )
snake_case_ : Union[str, Any] = tf.nn.log_softmax(__magic_name__ )
snake_case_ : Optional[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
snake_case_ : Optional[int] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__magic_name__ )
if target is not None:
snake_case_ : Any = tf.boolean_mask(__magic_name__ , __magic_name__ )
snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ )
snake_case_ : str = self._gather_logprob(__magic_name__ , __magic_name__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__magic_name__ , -cur_logprob , shape_list(__magic_name__ ) )
snake_case_ : str = tf.concat(__magic_name__ , axis=-1 )
if target is not None:
if return_mean:
snake_case_ : int = tf.reduce_mean(__magic_name__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__magic_name__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__magic_name__ , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 60 | 1 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__="resnet50" , __magic_name__=3 , __magic_name__=32 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , ) -> str:
'''simple docstring'''
snake_case_ : int = parent
snake_case_ : Union[str, Any] = out_indices if out_indices is not None else [4]
snake_case_ : Union[str, Any] = stage_names
snake_case_ : Tuple = out_features
snake_case_ : Any = backbone
snake_case_ : Dict = batch_size
snake_case_ : Any = image_size
snake_case_ : Dict = num_channels
snake_case_ : int = use_pretrained_backbone
snake_case_ : Any = is_training
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Optional[int] = self.get_config()
return config, pixel_values
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = TimmBackbone(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
snake_case_ : List[Any] = model(__magic_name__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.prepare_config_and_inputs()
snake_case_ , snake_case_ : Any = config_and_inputs
snake_case_ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __lowerCAmelCase ( _a, _a, _a, unittest.TestCase ):
lowerCamelCase_ : List[str] = (TimmBackbone,) if is_torch_available() else ()
lowerCamelCase_ : Tuple = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
lowerCamelCase_ : int = False
lowerCamelCase_ : int = False
lowerCamelCase_ : str = False
lowerCamelCase_ : Any = False
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : str = TimmBackboneModelTester(self )
snake_case_ : int = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = '''resnet18'''
snake_case_ : List[str] = '''microsoft/resnet-18'''
snake_case_ : Union[str, Any] = AutoBackbone.from_pretrained(__magic_name__ , use_timm_backbone=__magic_name__ )
snake_case_ : Any = AutoBackbone.from_pretrained(__magic_name__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
snake_case_ : Optional[int] = AutoBackbone.from_pretrained(__magic_name__ , use_timm_backbone=__magic_name__ , out_indices=[1, 2, 3] )
snake_case_ : Dict = AutoBackbone.from_pretrained(__magic_name__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ , snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(__magic_name__ )
snake_case_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Optional[Any] = [*signature.parameters.keys()]
snake_case_ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ , snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = True
snake_case_ : Any = self.has_attentions
# no need to test all models as different heads yield the same functionality
snake_case_ : List[str] = self.all_model_classes[0]
snake_case_ : Union[str, Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
snake_case_ : Tuple = self._prepare_for_class(__magic_name__ , __magic_name__ )
snake_case_ : Optional[int] = model(**__magic_name__ )
snake_case_ : Union[str, Any] = outputs[0][-1]
# Encoder-/Decoder-only models
snake_case_ : Optional[int] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
snake_case_ : Dict = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__magic_name__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : Tuple = model(**__magic_name__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
snake_case_ : List[Any] = copy.deepcopy(__magic_name__ )
snake_case_ : List[Any] = None
snake_case_ : Tuple = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : int = model(**__magic_name__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
snake_case_ : Optional[Any] = copy.deepcopy(__magic_name__ )
snake_case_ : List[Any] = False
snake_case_ : Tuple = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : Dict = model(**__magic_name__ )
| 60 |
import requests
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None:
"""simple docstring"""
snake_case_ : Tuple = {'''Content-Type''': '''application/json'''}
snake_case_ : Any = requests.post(_UpperCamelCase , json={'''text''': message_body} , headers=_UpperCamelCase )
if response.status_code != 200:
snake_case_ : List[Any] = (
'''Request to slack returned an error '''
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 60 | 1 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
lowerCAmelCase_ = True
except ImportError:
lowerCAmelCase_ = False
lowerCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class __lowerCAmelCase ( _a ):
@staticmethod
def lowerCamelCase (__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''' , type=__magic_name__ , help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''' , type=__magic_name__ , help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=__magic_name__ )
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__=None , *__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = testing
snake_case_ : Tuple = testing_file
snake_case_ : int = path
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
snake_case_ : Union[str, Any] = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(__magic_name__ ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
snake_case_ : Dict = (
Path(__magic_name__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
snake_case_ : Optional[Any] = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__magic_name__ ) )
else:
with open(self._testing_file , '''r''' ) as configuration_file:
snake_case_ : Any = json.load(__magic_name__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__magic_name__ , extra_context=__magic_name__ , )
snake_case_ : Any = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''' ) as configuration_file:
snake_case_ : Dict = json.load(__magic_name__ )
snake_case_ : Tuple = configuration['''lowercase_modelname''']
snake_case_ : Optional[Any] = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(F'''{directory}/configuration.json''' )
snake_case_ : Optional[int] = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
snake_case_ : List[Any] = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
snake_case_ : Any = '''Flax''' in generate_tensorflow_pytorch_and_flax
snake_case_ : List[Any] = F'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
os.makedirs(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=__magic_name__ )
# Tests require submodules as they have parent imports
with open(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , '''w''' ):
pass
shutil.move(
F'''{directory}/__init__.py''' , F'''{model_dir}/__init__.py''' , )
shutil.move(
F'''{directory}/configuration_{lowercase_model_name}.py''' , F'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(__magic_name__ ):
with open(__magic_name__ , '''r''' ) as f:
snake_case_ : str = f.readlines()
with open(__magic_name__ , '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__magic_name__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_tf_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/modeling_flax_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
F'''{directory}/{lowercase_model_name}.md''' , F'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
F'''{directory}/tokenization_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
F'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__magic_name__ , __magic_name__ , __magic_name__ ):
# Create temp file
snake_case_ , snake_case_ : Union[str, Any] = mkstemp()
snake_case_ : Any = False
with fdopen(__magic_name__ , '''w''' ) as new_file:
with open(__magic_name__ ) as old_file:
for line in old_file:
new_file.write(__magic_name__ )
if line_to_copy_below in line:
snake_case_ : List[str] = True
for line_to_copy in lines_to_copy:
new_file.write(__magic_name__ )
if not line_found:
raise ValueError(F'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(__magic_name__ , __magic_name__ )
# Remove original file
remove(__magic_name__ )
# Move new file
move(__magic_name__ , __magic_name__ )
def skip_units(__magic_name__ ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__magic_name__ ):
with open(__magic_name__ ) as datafile:
snake_case_ : Any = []
snake_case_ : Optional[Any] = False
snake_case_ : Optional[int] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
snake_case_ : List[str] = line.split('''"''' )[1]
snake_case_ : List[str] = skip_units(__magic_name__ )
elif "# Below: " in line and "##" not in line:
snake_case_ : int = line.split('''"''' )[1]
snake_case_ : Optional[int] = skip_units(__magic_name__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__magic_name__ , __magic_name__ , __magic_name__ )
snake_case_ : str = []
elif "# Replace with" in line and "##" not in line:
snake_case_ : Union[str, Any] = []
elif "##" not in line:
lines_to_copy.append(__magic_name__ )
remove(__magic_name__ )
replace_in_files(F'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(__magic_name__ )
| 60 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 | 1 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : Tuple = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase_ : Union[str, Any] = ['''accelerate''', '''launch''']
lowerCamelCase_ : Tuple = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase_ : Tuple = '''default_config.yaml'''
lowerCamelCase_ : str = config_folder / config_file
lowerCamelCase_ : List[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase_ : Dict = Path('''tests/test_configs''' )
@classmethod
def lowerCamelCase (cls ) -> Dict:
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowerCamelCase (cls ) -> Any:
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=__magic_name__ ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(__magic_name__ ), self.test_file_path] , env=os.environ.copy() )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : List[str] = '''test-tpu'''
lowerCamelCase_ : Dict = '''us-central1-a'''
lowerCamelCase_ : Any = '''ls'''
lowerCamelCase_ : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase_ : Tuple = '''cd /usr/share'''
lowerCamelCase_ : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase_ : List[Any] = '''Running gcloud compute tpus tpu-vm ssh'''
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : int = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=__magic_name__ )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Any = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : str = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Tuple = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
| 60 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''owlvit_text_model'''
def __init__(self , __magic_name__=4_9408 , __magic_name__=512 , __magic_name__=2048 , __magic_name__=12 , __magic_name__=8 , __magic_name__=16 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=0 , __magic_name__=4_9406 , __magic_name__=4_9407 , **__magic_name__ , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
snake_case_ : int = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[Any] = intermediate_size
snake_case_ : str = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : str = hidden_act
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Dict = attention_dropout
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : int = initializer_factor
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : str = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
snake_case_ : str = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = '''owlvit_vision_model'''
def __init__(self , __magic_name__=768 , __magic_name__=3072 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3 , __magic_name__=768 , __magic_name__=32 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , **__magic_name__ , ) -> int:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : List[Any] = num_channels
snake_case_ : Union[str, Any] = image_size
snake_case_ : Dict = patch_size
snake_case_ : List[Any] = hidden_act
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Dict = attention_dropout
snake_case_ : List[str] = initializer_range
snake_case_ : List[Any] = initializer_factor
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
snake_case_ : str = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = '''owlvit'''
lowerCamelCase_ : Optional[int] = True
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=512 , __magic_name__=2.6_592 , __magic_name__=True , **__magic_name__ , ) -> int:
'''simple docstring'''
super().__init__(**__magic_name__ )
if text_config is None:
snake_case_ : Tuple = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
snake_case_ : str = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
snake_case_ : str = OwlViTTextConfig(**__magic_name__ )
snake_case_ : Union[str, Any] = OwlViTVisionConfig(**__magic_name__ )
snake_case_ : Any = projection_dim
snake_case_ : Union[str, Any] = logit_scale_init_value
snake_case_ : str = return_dict
snake_case_ : Any = 1.0
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : Optional[Any] = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = {}
snake_case_ : Union[str, Any] = text_config
snake_case_ : Optional[Any] = vision_config
return cls.from_dict(__magic_name__ , **__magic_name__ )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : List[Any] = self.text_config.to_dict()
snake_case_ : List[Any] = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
class __lowerCAmelCase ( _a ):
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def lowerCamelCase (self ) -> float:
'''simple docstring'''
return 1e-4
def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Dict = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__magic_name__ , seq_length=__magic_name__ , framework=__magic_name__ )
snake_case_ : List[str] = super().generate_dummy_inputs(
processor.image_processor , batch_size=__magic_name__ , framework=__magic_name__ )
return {**text_input_dict, **image_input_dict}
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return 14
| 60 | 1 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase :
lowerCamelCase_ : str
lowerCamelCase_ : str = None
@staticmethod
def lowerCamelCase () -> Any:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowerCamelCase (cls ) -> List[Any]:
'''simple docstring'''
return F'''`pip install {cls.pip_package or cls.name}`'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[int] = '''optuna'''
@staticmethod
def lowerCamelCase () -> Union[str, Any]:
'''simple docstring'''
return is_optuna_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return run_hp_search_optuna(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return default_hp_space_optuna(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''ray'''
lowerCamelCase_ : List[str] = '''\'ray[tune]\''''
@staticmethod
def lowerCamelCase () -> List[Any]:
'''simple docstring'''
return is_ray_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_ray(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return default_hp_space_ray(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''sigopt'''
@staticmethod
def lowerCamelCase () -> Optional[int]:
'''simple docstring'''
return is_sigopt_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
return run_hp_search_sigopt(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
return default_hp_space_sigopt(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''wandb'''
@staticmethod
def lowerCamelCase () -> Dict:
'''simple docstring'''
return is_wandb_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_wandb(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return default_hp_space_wandb(__magic_name__ )
lowerCAmelCase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
snake_case_ : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_UpperCamelCase ) > 0:
snake_case_ : Dict = available_backends[0].name
if len(_UpperCamelCase ) > 1:
logger.info(
f'''{len(_UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 60 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : Tuple = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase_ : Union[str, Any] = ['''accelerate''', '''launch''']
lowerCamelCase_ : Tuple = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase_ : Tuple = '''default_config.yaml'''
lowerCamelCase_ : str = config_folder / config_file
lowerCamelCase_ : List[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase_ : Dict = Path('''tests/test_configs''' )
@classmethod
def lowerCamelCase (cls ) -> Dict:
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowerCamelCase (cls ) -> Any:
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=__magic_name__ ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(__magic_name__ ), self.test_file_path] , env=os.environ.copy() )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : List[str] = '''test-tpu'''
lowerCamelCase_ : Dict = '''us-central1-a'''
lowerCamelCase_ : Any = '''ls'''
lowerCamelCase_ : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase_ : Tuple = '''cd /usr/share'''
lowerCamelCase_ : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase_ : List[Any] = '''Running gcloud compute tpus tpu-vm ssh'''
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : int = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=__magic_name__ )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Any = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : str = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Tuple = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
| 60 | 1 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''roberta''', choices=['''roberta''', '''gpt2'''])
parser.add_argument('''--model_name''', default='''roberta-large''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_roberta_048131723.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
lowerCAmelCase_ = parser.parse_args()
if args.model_type == "roberta":
lowerCAmelCase_ = RobertaForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase_ = '''roberta'''
elif args.model_type == "gpt2":
lowerCAmelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name)
lowerCAmelCase_ = '''transformer'''
lowerCAmelCase_ = model.state_dict()
lowerCAmelCase_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowerCAmelCase_ = state_dict[F'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowerCAmelCase_ = F'''{prefix}.embeddings.{w}.weight'''
lowerCAmelCase_ = state_dict[param_name]
for w in ["weight", "bias"]:
lowerCAmelCase_ = F'''{prefix}.embeddings.LayerNorm.{w}'''
lowerCAmelCase_ = state_dict[param_name]
# Transformer Blocks #
lowerCAmelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowerCAmelCase_ = state_dict[
F'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
lowerCAmelCase_ = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowerCAmelCase_ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowerCAmelCase_ = state_dict[F'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase_ = state_dict[F'''lm_head.dense.{w}''']
lowerCAmelCase_ = state_dict[F'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowerCAmelCase_ = state_dict[F'''{prefix}.ln_f.{w}''']
lowerCAmelCase_ = state_dict['''lm_head.weight''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 60 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__=None , **__magic_name__ ) -> Dict:
'''simple docstring'''
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , __magic_name__ , )
super().__init__(args=__magic_name__ , **__magic_name__ )
| 60 | 1 |
import math
import sys
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Any = ''''''
try:
with open(_UpperCamelCase , '''rb''' ) as binary_file:
snake_case_ : Dict = binary_file.read()
for dat in data:
snake_case_ : Union[str, Any] = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Any = {'''0''': '''0''', '''1''': '''1'''}
snake_case_ , snake_case_ : List[Any] = '''''', ''''''
snake_case_ : List[str] = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
snake_case_ : List[str] = lexicon[curr_string]
result += last_match_id
snake_case_ : int = last_match_id + '''0'''
if math.loga(_UpperCamelCase ).is_integer():
snake_case_ : int = {}
for curr_key in list(_UpperCamelCase ):
snake_case_ : Tuple = lexicon.pop(_UpperCamelCase )
snake_case_ : Optional[int] = new_lex
snake_case_ : str = last_match_id + '''1'''
index += 1
snake_case_ : Tuple = ''''''
return result
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None:
"""simple docstring"""
snake_case_ : Optional[Any] = 8
try:
with open(_UpperCamelCase , '''wb''' ) as opened_file:
snake_case_ : Tuple = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
snake_case_ : Union[str, Any] = data_bits[counter:]
snake_case_ : Optional[int] = data_bits[counter + 1 :]
return data_bits
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None:
"""simple docstring"""
snake_case_ : Optional[Any] = read_file_binary(_UpperCamelCase )
snake_case_ : int = remove_prefix(_UpperCamelCase )
snake_case_ : Dict = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 60 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
snake_case_ : str = '''mock-s3-bucket'''
snake_case_ : str = f'''s3://{mock_bucket}'''
snake_case_ : Any = extract_path_from_uri(_UpperCamelCase )
assert dataset_path.startswith('''s3://''' ) is False
snake_case_ : Optional[Any] = '''./local/path'''
snake_case_ : List[str] = extract_path_from_uri(_UpperCamelCase )
assert dataset_path == new_dataset_path
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = is_remote_filesystem(_UpperCamelCase )
assert is_remote is True
snake_case_ : Union[str, Any] = fsspec.filesystem('''file''' )
snake_case_ : int = is_remote_filesystem(_UpperCamelCase )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
snake_case_ : Optional[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
snake_case_ : List[Any] = f'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCamelCase )
snake_case_ : Dict = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
snake_case_ : int = os.path.basename(_UpperCamelCase )
snake_case_ : Any = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f, open(_UpperCamelCase , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
snake_case_ : Any = compressed_file_paths[protocol]
snake_case_ : Any = '''dataset.jsonl'''
snake_case_ : Dict = f'''{protocol}://{member_file_path}::{compressed_file_path}'''
snake_case_ , *snake_case_ : Optional[Any] = fsspec.get_fs_token_paths(_UpperCamelCase )
assert fs.isfile(_UpperCamelCase )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = hf_api.dataset_info(_UpperCamelCase , token=_UpperCamelCase )
snake_case_ : List[str] = HfFileSystem(repo_info=_UpperCamelCase , token=_UpperCamelCase )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(_UpperCamelCase ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
snake_case_ : Tuple = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_UpperCamelCase , _UpperCamelCase , clobber=_UpperCamelCase )
with pytest.warns(_UpperCamelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_UpperCamelCase ) == 1
assert (
str(warning_info[0].message )
== f'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 60 | 1 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(_a )
class __lowerCAmelCase ( _a ):
def __init__(self , **__magic_name__ ) -> Tuple:
'''simple docstring'''
super().__init__(**__magic_name__ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__(self , __magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
return super().__call__(__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , **__magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = {}
if "candidate_labels" in kwargs:
snake_case_ : Optional[int] = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
snake_case_ : Tuple = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCamelCase (self , __magic_name__ , __magic_name__=None , __magic_name__="This is a photo of {}." ) -> List[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = load_image(__magic_name__ )
snake_case_ : Optional[int] = self.image_processor(images=[image] , return_tensors=self.framework )
snake_case_ : str = candidate_labels
snake_case_ : Any = [hypothesis_template.format(__magic_name__ ) for x in candidate_labels]
snake_case_ : List[Any] = self.tokenizer(__magic_name__ , return_tensors=self.framework , padding=__magic_name__ )
snake_case_ : str = [text_inputs]
return inputs
def lowerCamelCase (self , __magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : Dict = model_inputs.pop('''candidate_labels''' )
snake_case_ : Optional[Any] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __magic_name__ ):
snake_case_ : Dict = text_inputs[0]
else:
# Batching case.
snake_case_ : List[str] = text_inputs[0][0]
snake_case_ : str = self.model(**__magic_name__ , **__magic_name__ )
snake_case_ : Any = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def lowerCamelCase (self , __magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = model_outputs.pop('''candidate_labels''' )
snake_case_ : Optional[int] = model_outputs['''logits'''][0]
if self.framework == "pt":
snake_case_ : Dict = logits.softmax(dim=-1 ).squeeze(-1 )
snake_case_ : Optional[Any] = probs.tolist()
if not isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : Optional[Any] = [scores]
elif self.framework == "tf":
snake_case_ : Tuple = stable_softmax(__magic_name__ , axis=-1 )
snake_case_ : List[str] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
snake_case_ : Union[str, Any] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__magic_name__ , __magic_name__ ) , key=lambda __magic_name__ : -x[0] )
]
return result
| 60 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[Any] = '''encoder-decoder'''
lowerCamelCase_ : Optional[Any] = True
def __init__(self , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__magic_name__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case_ : Any = kwargs.pop('''encoder''' )
snake_case_ : Tuple = encoder_config.pop('''model_type''' )
snake_case_ : Union[str, Any] = kwargs.pop('''decoder''' )
snake_case_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
snake_case_ : Optional[int] = AutoConfig.for_model(__magic_name__ , **__magic_name__ )
snake_case_ : List[str] = AutoConfig.for_model(__magic_name__ , **__magic_name__ )
snake_case_ : Any = True
@classmethod
def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
snake_case_ : Tuple = True
snake_case_ : Optional[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : str = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.encoder.to_dict()
snake_case_ : Dict = self.decoder.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 60 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
lowerCAmelCase_ = {
'''allenai/longformer-base-4096''': 4_0_9_6,
'''allenai/longformer-large-4096''': 4_0_9_6,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4_0_9_6,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4_0_9_6,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
snake_case_ : str = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
snake_case_ : Union[str, Any] = bs[:]
snake_case_ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
snake_case_ : Any = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : List[str] = set()
snake_case_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ : Optional[int] = char
return pairs
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = VOCAB_FILES_NAMES
lowerCamelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__="replace" , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="</s>" , __magic_name__="<s>" , __magic_name__="<unk>" , __magic_name__="<pad>" , __magic_name__="<mask>" , __magic_name__=False , **__magic_name__ , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else bos_token
snake_case_ : List[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token
snake_case_ : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else sep_token
snake_case_ : Optional[int] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else cls_token
snake_case_ : Union[str, Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token
snake_case_ : Any = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : str = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
super().__init__(
errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , **__magic_name__ , )
with open(__magic_name__ , encoding='''utf-8''' ) as vocab_handle:
snake_case_ : Optional[Any] = json.load(__magic_name__ )
snake_case_ : Optional[int] = {v: k for k, v in self.encoder.items()}
snake_case_ : Optional[Any] = errors # how to handle errors in decoding
snake_case_ : Union[str, Any] = bytes_to_unicode()
snake_case_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(__magic_name__ , encoding='''utf-8''' ) as merges_handle:
snake_case_ : Dict = merges_handle.read().split('''\n''' )[1:-1]
snake_case_ : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
snake_case_ : Union[str, Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
snake_case_ : int = {}
snake_case_ : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case_ : Optional[Any] = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
return len(self.encoder )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
snake_case_ : str = tuple(__magic_name__ )
snake_case_ : Dict = get_pairs(__magic_name__ )
if not pairs:
return token
while True:
snake_case_ : Optional[int] = min(__magic_name__ , key=lambda __magic_name__ : self.bpe_ranks.get(__magic_name__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ : List[Any] = bigram
snake_case_ : int = []
snake_case_ : int = 0
while i < len(__magic_name__ ):
try:
snake_case_ : Union[str, Any] = word.index(__magic_name__ , __magic_name__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ : Any = j
if word[i] == first and i < len(__magic_name__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ : Dict = tuple(__magic_name__ )
snake_case_ : List[str] = new_word
if len(__magic_name__ ) == 1:
break
else:
snake_case_ : Optional[int] = get_pairs(__magic_name__ )
snake_case_ : List[str] = ''' '''.join(__magic_name__ )
snake_case_ : List[Any] = word
return word
def lowerCamelCase (self , __magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : Tuple = []
for token in re.findall(self.pat , __magic_name__ ):
snake_case_ : Optional[Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__magic_name__ ).split(''' ''' ) )
return bpe_tokens
def lowerCamelCase (self , __magic_name__ ) -> Any:
'''simple docstring'''
return self.encoder.get(__magic_name__ , self.encoder.get(self.unk_token ) )
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
return self.decoder.get(__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = ''''''.join(__magic_name__ )
snake_case_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__magic_name__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : Dict = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : Optional[int] = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + '''\n''' )
snake_case_ : List[str] = 0
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __magic_name__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
snake_case_ : Optional[int] = token_index
writer.write(''' '''.join(__magic_name__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ : Any = [self.cls_token_id]
snake_case_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
if token_ids_a is None:
return [1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1]
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Any = [self.sep_token_id]
snake_case_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase (self , __magic_name__ , __magic_name__=False , **__magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__magic_name__ ) > 0 and not text[0].isspace()):
snake_case_ : Optional[Any] = ''' ''' + text
return (text, kwargs)
| 60 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = question_encoder
snake_case_ : Optional[int] = generator
snake_case_ : Optional[Any] = self.question_encoder
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
if os.path.isfile(__magic_name__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
snake_case_ : str = os.path.join(__magic_name__ , '''question_encoder_tokenizer''' )
snake_case_ : List[Any] = os.path.join(__magic_name__ , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(__magic_name__ )
self.generator.save_pretrained(__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
snake_case_ : List[str] = kwargs.pop('''config''' , __magic_name__ )
if config is None:
snake_case_ : int = RagConfig.from_pretrained(__magic_name__ )
snake_case_ : Dict = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
snake_case_ : Dict = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=__magic_name__ , generator=__magic_name__ )
def __call__(self , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
return self.current_tokenizer(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
return self.generator.batch_decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
return self.generator.decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.question_encoder
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.generator
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ) -> BatchEncoding:
'''simple docstring'''
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , __magic_name__ , )
if max_length is None:
snake_case_ : Dict = self.current_tokenizer.model_max_length
snake_case_ : List[str] = self(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case_ : Optional[int] = self.current_tokenizer.model_max_length
snake_case_ : Union[str, Any] = self(
text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
snake_case_ : str = labels['''input_ids''']
return model_inputs
| 60 | 1 |
from __future__ import annotations
import requests
def lowerCamelCase_ ( _UpperCamelCase ) -> dict:
"""simple docstring"""
snake_case_ : Tuple = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_UpperCamelCase ).json()
def lowerCamelCase_ ( _UpperCamelCase = 10 ) -> list[dict]:
"""simple docstring"""
snake_case_ : Any = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
snake_case_ : Tuple = requests.get(_UpperCamelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCamelCase ) for story_id in story_ids]
def lowerCamelCase_ ( _UpperCamelCase = 10 ) -> str:
"""simple docstring"""
snake_case_ : int = hackernews_top_stories(_UpperCamelCase )
return "\n".join('''* [{title}]({url})'''.format(**_UpperCamelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 60 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=30 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10 , __magic_name__=0.02 , __magic_name__=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : List[Any] = image_size
snake_case_ : Optional[int] = patch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = is_training
snake_case_ : List[Any] = use_labels
snake_case_ : Optional[int] = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : List[str] = type_sequence_label_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case_ : Any = (image_size // patch_size) ** 2
snake_case_ : int = num_patches + 1
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : List[Any] = None
if self.use_labels:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : int = self.get_config()
return config, pixel_values, labels
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : int = ViTMSNModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : List[str] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.type_sequence_label_size
snake_case_ : Tuple = ViTMSNForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : Any = model(__magic_name__ , labels=__magic_name__ )
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' )
print('''Labels: {labels}''' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ : Optional[int] = 1
snake_case_ : List[str] = ViTMSNForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : Any = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs
snake_case_ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _a, _a, unittest.TestCase ):
lowerCamelCase_ : List[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowerCamelCase_ : Optional[int] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ : int = False
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : int = False
lowerCamelCase_ : Optional[int] = False
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = ViTMSNModelTester(self )
snake_case_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''' )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Any = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(__magic_name__ )
snake_case_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Optional[int] = [*signature.parameters.keys()]
snake_case_ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = ViTMSNModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None
@slow
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
torch.manual_seed(2 )
snake_case_ : List[str] = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(__magic_name__ )
snake_case_ : str = self.default_image_processor
snake_case_ : str = prepare_img()
snake_case_ : int = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ )
# forward pass
with torch.no_grad():
snake_case_ : Optional[int] = model(**__magic_name__ )
# verify the logits
snake_case_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
snake_case_ : List[Any] = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
| 60 | 1 |
import argparse
import os
import re
lowerCAmelCase_ = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCAmelCase_ = re.compile(r'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
lowerCAmelCase_ = re.compile(r'''\s*\(\s*"(\S[^"]+)"''')
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = False ) -> Tuple:
"""simple docstring"""
with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
snake_case_ : Optional[Any] = f.read()
snake_case_ : Optional[Any] = content.split('''\n''' )
snake_case_ : Optional[int] = []
snake_case_ : List[Any] = 0
while line_idx < len(_UpperCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
snake_case_ : Any = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
snake_case_ : List[Any] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
snake_case_ : int = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
snake_case_ : int = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : _re_identifier.search(_UpperCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_UpperCamelCase ) )
elif "\n".join(_UpperCamelCase ) != content:
return True
def lowerCamelCase_ ( _UpperCamelCase = False ) -> Tuple:
"""simple docstring"""
snake_case_ : Union[str, Any] = [os.path.join(_UpperCamelCase , _UpperCamelCase ) for f in os.listdir(_UpperCamelCase ) if f.endswith('''.py''' )]
snake_case_ : Any = [sort_auto_mapping(_UpperCamelCase , overwrite=_UpperCamelCase ) for fname in fnames]
if not overwrite and any(_UpperCamelCase ):
snake_case_ : List[str] = [f for f, d in zip(_UpperCamelCase , _UpperCamelCase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_UpperCamelCase )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 60 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : List[Any] = '''efficientnet'''
def __init__(self , __magic_name__ = 3 , __magic_name__ = 600 , __magic_name__ = 2.0 , __magic_name__ = 3.1 , __magic_name__ = 8 , __magic_name__ = [3, 3, 5, 3, 5, 5, 3] , __magic_name__ = [32, 16, 24, 40, 80, 112, 192] , __magic_name__ = [16, 24, 40, 80, 112, 192, 320] , __magic_name__ = [] , __magic_name__ = [1, 2, 2, 2, 1, 2, 1] , __magic_name__ = [1, 2, 2, 3, 3, 4, 1] , __magic_name__ = [1, 6, 6, 6, 6, 6, 6] , __magic_name__ = 0.25 , __magic_name__ = "swish" , __magic_name__ = 2560 , __magic_name__ = "mean" , __magic_name__ = 0.02 , __magic_name__ = 0.001 , __magic_name__ = 0.99 , __magic_name__ = 0.5 , __magic_name__ = 0.2 , **__magic_name__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : List[str] = num_channels
snake_case_ : Tuple = image_size
snake_case_ : Union[str, Any] = width_coefficient
snake_case_ : Tuple = depth_coefficient
snake_case_ : Optional[Any] = depth_divisor
snake_case_ : Optional[int] = kernel_sizes
snake_case_ : str = in_channels
snake_case_ : Optional[Any] = out_channels
snake_case_ : int = depthwise_padding
snake_case_ : Optional[Any] = strides
snake_case_ : Any = num_block_repeats
snake_case_ : Optional[Any] = expand_ratios
snake_case_ : Union[str, Any] = squeeze_expansion_ratio
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Union[str, Any] = hidden_dim
snake_case_ : Any = pooling_type
snake_case_ : List[str] = initializer_range
snake_case_ : str = batch_norm_eps
snake_case_ : Optional[int] = batch_norm_momentum
snake_case_ : Optional[Any] = dropout_rate
snake_case_ : List[str] = drop_connect_rate
snake_case_ : Union[str, Any] = sum(__magic_name__ ) * 4
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = version.parse('''1.11''' )
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase (self ) -> float:
'''simple docstring'''
return 1e-5
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase_ = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int)
lowerCAmelCase_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
lowerCAmelCase_ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
lowerCAmelCase_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCAmelCase_ = [0] * args.vocab_size
for k, v in counter.items():
lowerCAmelCase_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 60 | 1 |
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
snake_case_ : Dict = []
snake_case_ : int = set({'''(''', '''[''', '''{'''} )
snake_case_ : Dict = set({''')''', ''']''', '''}'''} )
snake_case_ : Dict = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(_UpperCamelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_UpperCamelCase ) == 0 or (len(_UpperCamelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_UpperCamelCase ) == 0
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
snake_case_ : Tuple = input('''Enter sequence of brackets: ''' )
if is_balanced(_UpperCamelCase ):
print(_UpperCamelCase , '''is balanced''' )
else:
print(_UpperCamelCase , '''is not balanced''' )
if __name__ == "__main__":
main()
| 60 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__ = "▁" , __magic_name__ = True , __magic_name__ = "<unk>" , __magic_name__ = "</s>" , __magic_name__ = "<pad>" , ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
snake_case_ : List[str] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
snake_case_ : int = token_dict['''token''']
snake_case_ : Optional[int] = Tokenizer(Unigram() )
snake_case_ : int = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ),
normalizers.Lowercase(),
] )
snake_case_ : Optional[int] = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__magic_name__ , add_prefix_space=__magic_name__ ),
pre_tokenizers.Digits(individual_digits=__magic_name__ ),
pre_tokenizers.Punctuation(),
] )
snake_case_ : Tuple = decoders.Metaspace(replacement=__magic_name__ , add_prefix_space=__magic_name__ )
snake_case_ : Optional[Any] = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , )
snake_case_ : Optional[Any] = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(__magic_name__ , __magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ = 8000 , __magic_name__ = True , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = trainers.UnigramTrainer(
vocab_size=__magic_name__ , special_tokens=self.special_tokens_list , show_progress=__magic_name__ , )
if isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : Dict = [files]
self._tokenizer.train(__magic_name__ , trainer=__magic_name__ )
self.add_unk_id()
def lowerCamelCase (self , __magic_name__ , __magic_name__ = 8000 , __magic_name__ = True , ) -> int:
'''simple docstring'''
snake_case_ : Any = trainers.UnigramTrainer(
vocab_size=__magic_name__ , special_tokens=self.special_tokens_list , show_progress=__magic_name__ , )
self._tokenizer.train_from_iterator(__magic_name__ , trainer=__magic_name__ )
self.add_unk_id()
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = json.loads(self._tokenizer.to_str() )
snake_case_ : Union[str, Any] = self.special_tokens['''unk''']['''id''']
snake_case_ : Tuple = Tokenizer.from_str(json.dumps(__magic_name__ ) )
| 60 | 1 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : Optional[int] = [1]
for i in range(2 , _UpperCamelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
snake_case_ : int = []
snake_case_ : Dict = list(range(_UpperCamelCase ) )
# Find permutation
while factorials:
snake_case_ : int = factorials.pop()
snake_case_ , snake_case_ : Tuple = divmod(_UpperCamelCase , _UpperCamelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[Any] = [False] * len(_UpperCamelCase )
snake_case_ : int = [-1] * len(_UpperCamelCase )
def dfs(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Dict = True
snake_case_ : Dict = c
for u in graph[v]:
if not visited[u]:
dfs(_UpperCamelCase , 1 - c )
for i in range(len(_UpperCamelCase ) ):
if not visited[i]:
dfs(_UpperCamelCase , 0 )
for i in range(len(_UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowerCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 60 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : str = ['''input_ids''', '''attention_mask''']
lowerCamelCase_ : Union[str, Any] = None
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__="<unk>" , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="<pad>" , __magic_name__=False , __magic_name__=False , **__magic_name__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
__magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , pad_token=__magic_name__ , add_prefix_space=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ , **__magic_name__ , )
snake_case_ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __magic_name__ ) != add_prefix_space:
snake_case_ : str = getattr(__magic_name__ , pre_tok_state.pop('''type''' ) )
snake_case_ : List[str] = add_prefix_space
snake_case_ : Dict = pre_tok_class(**__magic_name__ )
snake_case_ : Optional[int] = add_prefix_space
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Optional[Any] = kwargs.get('''is_split_into_words''' , __magic_name__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> BatchEncoding:
'''simple docstring'''
snake_case_ : Any = kwargs.get('''is_split_into_words''' , __magic_name__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._encode_plus(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Dict = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> List[int]:
'''simple docstring'''
snake_case_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__magic_name__ , add_special_tokens=__magic_name__ ) + [self.eos_token_id] )
if len(__magic_name__ ) > self.model_max_length:
snake_case_ : Union[str, Any] = input_ids[-self.model_max_length :]
return input_ids
| 60 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=False , ) -> int:
'''simple docstring'''
snake_case_ : int = size if size is not None else {'''height''': 20, '''width''': 20}
snake_case_ : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
snake_case_ : str = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : Dict = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Union[str, Any] = min_resolution
snake_case_ : Tuple = max_resolution
snake_case_ : str = do_resize
snake_case_ : Tuple = size
snake_case_ : int = do_center_crop
snake_case_ : Tuple = crop_size
snake_case_ : int = do_normalize
snake_case_ : Optional[Any] = image_mean
snake_case_ : List[str] = image_std
snake_case_ : str = do_reduce_labels
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
snake_case_ : Union[str, Any] = Image.open(dataset[0]['''file'''] )
snake_case_ : str = Image.open(dataset[1]['''file'''] )
return image, map
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
snake_case_ : str = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
snake_case_ : Optional[Any] = Image.open(ds[0]['''file'''] )
snake_case_ : Optional[Any] = Image.open(ds[1]['''file'''] )
snake_case_ : List[str] = Image.open(ds[2]['''file'''] )
snake_case_ : str = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : List[Any] = BeitImageProcessor if is_vision_available() else None
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : int = BeitImageProcessingTester(self )
@property
def lowerCamelCase (self ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''size''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(__magic_name__ , '''center_crop''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_normalize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''image_mean''' ) )
self.assertTrue(hasattr(__magic_name__ , '''image_std''' ) )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , __magic_name__ )
snake_case_ : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__magic_name__ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , __magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : Any = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : Optional[int] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : List[str] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
snake_case_ : Union[str, Any] = []
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
snake_case_ : Any = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
snake_case_ , snake_case_ : Optional[int] = prepare_semantic_single_inputs()
snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
snake_case_ , snake_case_ : Dict = prepare_semantic_batch_inputs()
snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
snake_case_ , snake_case_ : Tuple = prepare_semantic_single_inputs()
snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
snake_case_ : List[Any] = True
snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 60 | 1 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowerCamelCase_ ( _UpperCamelCase ) -> list[list[float]]:
"""simple docstring"""
snake_case_ : Tuple = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_UpperCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
snake_case_ : Any = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
snake_case_ : List[str] = [[0.0, 0.0], [0.0, 0.0]]
snake_case_ , snake_case_ : Tuple = matrix[1][1], matrix[0][0]
snake_case_ , snake_case_ : Optional[Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_UpperCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_UpperCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
snake_case_ : Tuple = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
snake_case_ : List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
snake_case_ : Optional[Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
snake_case_ : Optional[Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
snake_case_ : Any = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
snake_case_ : str = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
snake_case_ : List[str] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
snake_case_ : Any = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
snake_case_ : str = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
snake_case_ : Optional[Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
snake_case_ : int = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
snake_case_ : Tuple = array(_UpperCamelCase )
for i in range(3 ):
for j in range(3 ):
snake_case_ : Tuple = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
snake_case_ : Any = array(_UpperCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_UpperCamelCase )
# Calculate the inverse of the matrix
return [[float(d(_UpperCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 60 |
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
lowerCAmelCase_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
lowerCAmelCase_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__="uniform_average" , __magic_name__=True ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = mean_squared_error(
__magic_name__ , __magic_name__ , sample_weight=__magic_name__ , multioutput=__magic_name__ , squared=__magic_name__ )
return {"mse": mse}
| 60 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = 0
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Dict = Path(__magic_name__ ) / '''preprocessor_config.json'''
snake_case_ : Dict = Path(__magic_name__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__magic_name__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__magic_name__ , '''w''' ) )
snake_case_ : Optional[int] = AutoImageProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Dict = Path(__magic_name__ ) / '''preprocessor_config.json'''
snake_case_ : Optional[int] = Path(__magic_name__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__magic_name__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__magic_name__ , '''w''' ) )
snake_case_ : List[Any] = AutoImageProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case_ : List[str] = Path(__magic_name__ ) / '''preprocessor_config.json'''
snake_case_ : List[str] = Path(__magic_name__ ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__magic_name__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__magic_name__ , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case_ : List[str] = AutoImageProcessor.from_pretrained(__magic_name__ ).to_dict()
config_dict.pop('''image_processor_type''' )
snake_case_ : List[Any] = CLIPImageProcessor(**__magic_name__ )
# save in new folder
model_config.save_pretrained(__magic_name__ )
config.save_pretrained(__magic_name__ )
snake_case_ : str = AutoImageProcessor.from_pretrained(__magic_name__ )
# make sure private variable is not incorrectly saved
snake_case_ : Union[str, Any] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : str = Path(__magic_name__ ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__magic_name__ , '''w''' ) , )
snake_case_ : str = AutoImageProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(
__magic_name__ , '''clip-base is not a local folder and is not a valid model identifier''' ):
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained('''clip-base''' )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
__magic_name__ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case_ : Union[str, Any] = AutoImageProcessor.from_pretrained(__magic_name__ , revision='''aaaaaa''' )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
__magic_name__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
snake_case_ : int = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(__magic_name__ ):
snake_case_ : Tuple = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__magic_name__ ):
snake_case_ : List[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__magic_name__ )
snake_case_ : List[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__magic_name__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__magic_name__ )
snake_case_ : Tuple = AutoImageProcessor.from_pretrained(__magic_name__ , trust_remote_code=__magic_name__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , __magic_name__ )
AutoImageProcessor.register(__magic_name__ , __magic_name__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__magic_name__ ):
AutoImageProcessor.register(__magic_name__ , __magic_name__ )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Tuple = Path(__magic_name__ ) / '''preprocessor_config.json'''
snake_case_ : List[str] = Path(__magic_name__ ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__magic_name__ , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__magic_name__ , '''w''' ) )
snake_case_ : Tuple = CustomImageProcessor.from_pretrained(__magic_name__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__magic_name__ )
snake_case_ : Dict = AutoImageProcessor.from_pretrained(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = True
try:
AutoConfig.register('''custom''' , __magic_name__ )
AutoImageProcessor.register(__magic_name__ , __magic_name__ )
# If remote code is not set, the default is to use local
snake_case_ : Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__magic_name__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case_ : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__magic_name__ )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(__magic_name__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 60 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __lowerCAmelCase :
lowerCamelCase_ : Any = None
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ : List[Any] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Optional[int] = os.path.join(__magic_name__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(__magic_name__ )
snake_case_ : str = self.feature_extraction_class.from_json_file(__magic_name__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : str = feat_extract_first.save_pretrained(__magic_name__ )[0]
check_json_file_has_correct_format(__magic_name__ )
snake_case_ : Dict = self.feature_extraction_class.from_pretrained(__magic_name__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.feature_extraction_class()
self.assertIsNotNone(__magic_name__ )
| 60 | 1 |
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
snake_case_ : Dict = grid[0]
for row_n in range(1 , len(_UpperCamelCase ) ):
snake_case_ : List[Any] = grid[row_n]
snake_case_ : List[Any] = fill_row(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[int] = grid[row_n]
return grid[-1][-1]
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(_UpperCamelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase :
lowerCamelCase_ : str
lowerCamelCase_ : str = None
@staticmethod
def lowerCamelCase () -> Any:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowerCamelCase (cls ) -> List[Any]:
'''simple docstring'''
return F'''`pip install {cls.pip_package or cls.name}`'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[int] = '''optuna'''
@staticmethod
def lowerCamelCase () -> Union[str, Any]:
'''simple docstring'''
return is_optuna_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return run_hp_search_optuna(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return default_hp_space_optuna(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''ray'''
lowerCamelCase_ : List[str] = '''\'ray[tune]\''''
@staticmethod
def lowerCamelCase () -> List[Any]:
'''simple docstring'''
return is_ray_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_ray(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return default_hp_space_ray(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''sigopt'''
@staticmethod
def lowerCamelCase () -> Optional[int]:
'''simple docstring'''
return is_sigopt_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
return run_hp_search_sigopt(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
return default_hp_space_sigopt(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''wandb'''
@staticmethod
def lowerCamelCase () -> Dict:
'''simple docstring'''
return is_wandb_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_wandb(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return default_hp_space_wandb(__magic_name__ )
lowerCAmelCase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
snake_case_ : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_UpperCamelCase ) > 0:
snake_case_ : Dict = available_backends[0].name
if len(_UpperCamelCase ) > 1:
logger.info(
f'''{len(_UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 60 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=True , __magic_name__=1 / 255 , __magic_name__=True , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
snake_case_ : List[Any] = parent
snake_case_ : Dict = batch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : Tuple = min_resolution
snake_case_ : Dict = max_resolution
snake_case_ : Any = do_resize
snake_case_ : Union[str, Any] = size
snake_case_ : Any = do_normalize
snake_case_ : Optional[int] = image_mean
snake_case_ : List[str] = image_std
snake_case_ : Tuple = do_rescale
snake_case_ : Optional[Any] = rescale_factor
snake_case_ : Any = do_pad
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase (self , __magic_name__ , __magic_name__=False ) -> Union[str, Any]:
'''simple docstring'''
if not batched:
snake_case_ : Tuple = image_inputs[0]
if isinstance(__magic_name__ , Image.Image ):
snake_case_ , snake_case_ : Optional[Any] = image.size
else:
snake_case_ , snake_case_ : Any = image.shape[1], image.shape[2]
if w < h:
snake_case_ : int = int(self.size['''shortest_edge'''] * h / w )
snake_case_ : Any = self.size['''shortest_edge''']
elif w > h:
snake_case_ : List[str] = self.size['''shortest_edge''']
snake_case_ : int = int(self.size['''shortest_edge'''] * w / h )
else:
snake_case_ : Tuple = self.size['''shortest_edge''']
snake_case_ : int = self.size['''shortest_edge''']
else:
snake_case_ : int = []
for image in image_inputs:
snake_case_ , snake_case_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : int = max(__magic_name__ , key=lambda __magic_name__ : item[0] )[0]
snake_case_ : Union[str, Any] = max(__magic_name__ , key=lambda __magic_name__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = YolosImageProcessor if is_vision_available() else None
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = YolosImageProcessingTester(self )
@property
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''image_mean''' ) )
self.assertTrue(hasattr(__magic_name__ , '''image_std''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_normalize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''size''' ) )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , __magic_name__ )
snake_case_ : str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__magic_name__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
snake_case_ : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case_ , snake_case_ : Union[str, Any] = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ , snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
snake_case_ : int = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
snake_case_ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case_ , snake_case_ : int = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Any = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
snake_case_ , snake_case_ : Union[str, Any] = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
snake_case_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
snake_case_ , snake_case_ : Union[str, Any] = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : int = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
snake_case_ , snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
snake_case_ : Optional[Any] = self.image_processing_class(do_resize=__magic_name__ , do_normalize=__magic_name__ , do_rescale=__magic_name__ )
# create random PyTorch tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
snake_case_ : List[str] = image_processing_a.pad(__magic_name__ , return_tensors='''pt''' )
snake_case_ : Optional[Any] = image_processing_a(__magic_name__ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
snake_case_ : Tuple = json.loads(f.read() )
snake_case_ : int = {'''image_id''': 3_9769, '''annotations''': target}
# encode them
snake_case_ : List[Any] = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
snake_case_ : Union[str, Any] = image_processing(images=__magic_name__ , annotations=__magic_name__ , return_tensors='''pt''' )
# verify pixel values
snake_case_ : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __magic_name__ )
snake_case_ : Optional[Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __magic_name__ , atol=1e-4 ) )
# verify area
snake_case_ : Union[str, Any] = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __magic_name__ ) )
# verify boxes
snake_case_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __magic_name__ )
snake_case_ : int = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __magic_name__ , atol=1e-3 ) )
# verify image_id
snake_case_ : Any = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __magic_name__ ) )
# verify is_crowd
snake_case_ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __magic_name__ ) )
# verify class_labels
snake_case_ : Tuple = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __magic_name__ ) )
# verify orig_size
snake_case_ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __magic_name__ ) )
# verify size
snake_case_ : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __magic_name__ ) )
@slow
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
snake_case_ : int = json.loads(f.read() )
snake_case_ : Tuple = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
snake_case_ : Tuple = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
snake_case_ : Union[str, Any] = YolosImageProcessor(format='''coco_panoptic''' )
snake_case_ : Tuple = image_processing(images=__magic_name__ , annotations=__magic_name__ , masks_path=__magic_name__ , return_tensors='''pt''' )
# verify pixel values
snake_case_ : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __magic_name__ )
snake_case_ : str = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __magic_name__ , atol=1e-4 ) )
# verify area
snake_case_ : Any = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __magic_name__ ) )
# verify boxes
snake_case_ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __magic_name__ )
snake_case_ : Any = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __magic_name__ , atol=1e-3 ) )
# verify image_id
snake_case_ : Optional[int] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __magic_name__ ) )
# verify is_crowd
snake_case_ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __magic_name__ ) )
# verify class_labels
snake_case_ : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __magic_name__ ) )
# verify masks
snake_case_ : List[Any] = 82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __magic_name__ )
# verify orig_size
snake_case_ : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __magic_name__ ) )
# verify size
snake_case_ : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __magic_name__ ) )
| 60 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list:
"""simple docstring"""
snake_case_ : Tuple = len(_UpperCamelCase )
snake_case_ : Union[str, Any] = [[0] * n for i in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
snake_case_ : Any = y_points[i]
for i in range(2 , _UpperCamelCase ):
for j in range(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Optional[int] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 | 1 |
from sklearn.metrics import matthews_corrcoef
import datasets
lowerCAmelCase_ = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
lowerCAmelCase_ = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
lowerCAmelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=None ) -> int:
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__magic_name__ , __magic_name__ , sample_weight=__magic_name__ ) ),
}
| 60 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return getitem, k
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
return setitem, k, v
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
return delitem, k
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) -> str:
"""simple docstring"""
try:
return fun(_UpperCamelCase , *_UpperCamelCase ), None
except Exception as e:
return None, e
lowerCAmelCase_ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCAmelCase_ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCAmelCase_ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCAmelCase_ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCAmelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCAmelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
snake_case_ : Any = HashMap(initial_block_size=4 )
snake_case_ : Union[str, Any] = {}
for _, (fun, *args) in enumerate(_UpperCamelCase ):
snake_case_ , snake_case_ : str = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
assert my_res == py_res
assert str(_UpperCamelCase ) == str(_UpperCamelCase )
assert set(_UpperCamelCase ) == set(_UpperCamelCase )
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
assert set(my.items() ) == set(py.items() )
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
def is_public(_UpperCamelCase ) -> bool:
return not name.startswith('''_''' )
snake_case_ : str = {name for name in dir({} ) if is_public(_UpperCamelCase )}
snake_case_ : str = {name for name in dir(HashMap() ) if is_public(_UpperCamelCase )}
assert dict_public_names > hash_public_names
| 60 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=_UpperCamelCase , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=_UpperCamelCase , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=_UpperCamelCase )
return parser.parse_args()
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
snake_case_ : List[str] = parse_args()
# Import training_script as a module.
snake_case_ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case_ : Dict = script_fpath.stem
snake_case_ : int = importlib.import_module(_UpperCamelCase )
# Patch sys.argv
snake_case_ : Any = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 60 |
from __future__ import annotations
def lowerCamelCase_ ( _UpperCamelCase ) -> list:
"""simple docstring"""
if len(_UpperCamelCase ) == 0:
return []
snake_case_ , snake_case_ : Dict = min(_UpperCamelCase ), max(_UpperCamelCase )
snake_case_ : List[str] = int(max_value - min_value ) + 1
snake_case_ : list[list] = [[] for _ in range(_UpperCamelCase )]
for i in my_list:
buckets[int(i - min_value )].append(_UpperCamelCase )
return [v for bucket in buckets for v in sorted(_UpperCamelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 60 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( _UpperCamelCase ) -> list[int]: # This function is recursive
"""simple docstring"""
snake_case_ : int = len(_UpperCamelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
snake_case_ : Optional[Any] = array[0]
snake_case_ : Optional[int] = False
snake_case_ : Dict = 1
snake_case_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
snake_case_ : Dict = True
snake_case_ : str = [element for element in array[i:] if element >= array[i]]
snake_case_ : Tuple = longest_subsequence(_UpperCamelCase )
if len(_UpperCamelCase ) > len(_UpperCamelCase ):
snake_case_ : int = temp_array
else:
i += 1
snake_case_ : Union[str, Any] = [element for element in array[1:] if element >= pivot]
snake_case_ : Optional[Any] = [pivot, *longest_subsequence(_UpperCamelCase )]
if len(_UpperCamelCase ) > len(_UpperCamelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
import tensorflow as tf
from ...tf_utils import shape_list
class __lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=1 , __magic_name__=False , **__magic_name__ ) -> Dict:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : List[Any] = vocab_size
snake_case_ : Dict = d_embed
snake_case_ : Union[str, Any] = d_proj
snake_case_ : str = cutoffs + [vocab_size]
snake_case_ : int = [0] + self.cutoffs
snake_case_ : Optional[int] = div_val
snake_case_ : int = self.cutoffs[0]
snake_case_ : Any = len(self.cutoffs ) - 1
snake_case_ : Union[str, Any] = self.shortlist_size + self.n_clusters
snake_case_ : str = keep_order
snake_case_ : int = []
snake_case_ : Union[str, Any] = []
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
snake_case_ : Tuple = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_weight''' )
snake_case_ : Optional[Any] = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
snake_case_ : List[str] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' , )
self.out_projs.append(__magic_name__ )
else:
self.out_projs.append(__magic_name__ )
snake_case_ : Optional[Any] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , )
snake_case_ : List[str] = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case_ : Optional[Any] = self.d_embed // (self.div_val**i)
snake_case_ : int = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' )
self.out_projs.append(__magic_name__ )
snake_case_ : int = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , )
snake_case_ : Any = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(__magic_name__ )
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = x
if proj is not None:
snake_case_ : List[str] = tf.einsum('''ibd,ed->ibe''' , __magic_name__ , __magic_name__ )
return tf.einsum('''ibd,nd->ibn''' , __magic_name__ , __magic_name__ ) + b
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = shape_list(__magic_name__ )
snake_case_ : Tuple = tf.range(lp_size[0] , dtype=target.dtype )
snake_case_ : Dict = tf.stack([r, target] , 1 )
return tf.gather_nd(__magic_name__ , __magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=True , __magic_name__=False ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = 0
if self.n_clusters == 0:
snake_case_ : Any = self._logit(__magic_name__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
snake_case_ : Union[str, Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__magic_name__ , logits=__magic_name__ )
snake_case_ : Optional[Any] = tf.nn.log_softmax(__magic_name__ , axis=-1 )
else:
snake_case_ : Optional[int] = shape_list(__magic_name__ )
snake_case_ : int = []
snake_case_ : List[Any] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
snake_case_ : str = (target >= l_idx) & (target < r_idx)
snake_case_ : Dict = tf.where(__magic_name__ )
snake_case_ : List[str] = tf.boolean_mask(__magic_name__ , __magic_name__ ) - l_idx
if self.div_val == 1:
snake_case_ : Any = self.out_layers[0][0][l_idx:r_idx]
snake_case_ : Dict = self.out_layers[0][1][l_idx:r_idx]
else:
snake_case_ : Union[str, Any] = self.out_layers[i][0]
snake_case_ : int = self.out_layers[i][1]
if i == 0:
snake_case_ : List[Any] = tf.concat([cur_W, self.cluster_weight] , 0 )
snake_case_ : Tuple = tf.concat([cur_b, self.cluster_bias] , 0 )
snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[0] )
snake_case_ : Any = tf.nn.log_softmax(__magic_name__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ )
snake_case_ : Tuple = self._gather_logprob(__magic_name__ , __magic_name__ )
else:
snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[i] )
snake_case_ : Union[str, Any] = tf.nn.log_softmax(__magic_name__ )
snake_case_ : Optional[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
snake_case_ : Optional[int] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__magic_name__ )
if target is not None:
snake_case_ : Any = tf.boolean_mask(__magic_name__ , __magic_name__ )
snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ )
snake_case_ : str = self._gather_logprob(__magic_name__ , __magic_name__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__magic_name__ , -cur_logprob , shape_list(__magic_name__ ) )
snake_case_ : str = tf.concat(__magic_name__ , axis=-1 )
if target is not None:
if return_mean:
snake_case_ : int = tf.reduce_mean(__magic_name__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__magic_name__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__magic_name__ , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 60 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
print(f'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(_UpperCamelCase ):
print(f'''{i}\t\t{d}''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
for j in range(_UpperCamelCase ):
snake_case_ , snake_case_ , snake_case_ : Dict = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list[float]:
"""simple docstring"""
snake_case_ : Union[str, Any] = [float('''inf''' )] * vertex_count
snake_case_ : List[str] = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_UpperCamelCase ):
snake_case_ , snake_case_ , snake_case_ : Optional[Any] = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
snake_case_ : str = distance[u] + w
snake_case_ : Tuple = check_negative_cycle(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = int(input('''Enter number of vertices: ''').strip())
lowerCAmelCase_ = int(input('''Enter number of edges: ''').strip())
lowerCAmelCase_ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCAmelCase_ = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowerCAmelCase_ = int(input('''\nEnter shortest path source:''').strip())
lowerCAmelCase_ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 60 |
import requests
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None:
"""simple docstring"""
snake_case_ : Tuple = {'''Content-Type''': '''application/json'''}
snake_case_ : Any = requests.post(_UpperCamelCase , json={'''text''': message_body} , headers=_UpperCamelCase )
if response.status_code != 200:
snake_case_ : List[Any] = (
'''Request to slack returned an error '''
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 60 | 1 |
from math import pi
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> float:
"""simple docstring"""
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 60 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 | 1 |
lowerCAmelCase_ = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : int = [False] * len(_UpperCamelCase )
snake_case_ : str = [s]
snake_case_ : Tuple = True
while queue:
snake_case_ : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
snake_case_ : List[Any] = True
snake_case_ : str = u
return visited[t]
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
snake_case_ : int = [-1] * (len(_UpperCamelCase ))
snake_case_ : List[Any] = 0
snake_case_ : List[Any] = []
snake_case_ : List[Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
snake_case_ : List[Any] = float('''Inf''' )
snake_case_ : Tuple = sink
while s != source:
# Find the minimum value in select path
snake_case_ : Tuple = min(_UpperCamelCase , graph[parent[s]][s] )
snake_case_ : Tuple = parent[s]
max_flow += path_flow
snake_case_ : str = sink
while v != source:
snake_case_ : str = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
snake_case_ : Dict = parent[v]
for i in range(len(_UpperCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 60 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''owlvit_text_model'''
def __init__(self , __magic_name__=4_9408 , __magic_name__=512 , __magic_name__=2048 , __magic_name__=12 , __magic_name__=8 , __magic_name__=16 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=0 , __magic_name__=4_9406 , __magic_name__=4_9407 , **__magic_name__ , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
snake_case_ : int = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[Any] = intermediate_size
snake_case_ : str = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : str = hidden_act
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Dict = attention_dropout
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : int = initializer_factor
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : str = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
snake_case_ : str = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = '''owlvit_vision_model'''
def __init__(self , __magic_name__=768 , __magic_name__=3072 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3 , __magic_name__=768 , __magic_name__=32 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , **__magic_name__ , ) -> int:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : List[Any] = num_channels
snake_case_ : Union[str, Any] = image_size
snake_case_ : Dict = patch_size
snake_case_ : List[Any] = hidden_act
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Dict = attention_dropout
snake_case_ : List[str] = initializer_range
snake_case_ : List[Any] = initializer_factor
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
snake_case_ : str = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = '''owlvit'''
lowerCamelCase_ : Optional[int] = True
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=512 , __magic_name__=2.6_592 , __magic_name__=True , **__magic_name__ , ) -> int:
'''simple docstring'''
super().__init__(**__magic_name__ )
if text_config is None:
snake_case_ : Tuple = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
snake_case_ : str = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
snake_case_ : str = OwlViTTextConfig(**__magic_name__ )
snake_case_ : Union[str, Any] = OwlViTVisionConfig(**__magic_name__ )
snake_case_ : Any = projection_dim
snake_case_ : Union[str, Any] = logit_scale_init_value
snake_case_ : str = return_dict
snake_case_ : Any = 1.0
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : Optional[Any] = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = {}
snake_case_ : Union[str, Any] = text_config
snake_case_ : Optional[Any] = vision_config
return cls.from_dict(__magic_name__ , **__magic_name__ )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : List[Any] = self.text_config.to_dict()
snake_case_ : List[Any] = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
class __lowerCAmelCase ( _a ):
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def lowerCamelCase (self ) -> float:
'''simple docstring'''
return 1e-4
def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Dict = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__magic_name__ , seq_length=__magic_name__ , framework=__magic_name__ )
snake_case_ : List[str] = super().generate_dummy_inputs(
processor.image_processor , batch_size=__magic_name__ , framework=__magic_name__ )
return {**text_input_dict, **image_input_dict}
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return 14
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : Tuple = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase_ : Union[str, Any] = ['''accelerate''', '''launch''']
lowerCamelCase_ : Tuple = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase_ : Tuple = '''default_config.yaml'''
lowerCamelCase_ : str = config_folder / config_file
lowerCamelCase_ : List[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase_ : Dict = Path('''tests/test_configs''' )
@classmethod
def lowerCamelCase (cls ) -> Dict:
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowerCamelCase (cls ) -> Any:
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=__magic_name__ ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(__magic_name__ ), self.test_file_path] , env=os.environ.copy() )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : List[str] = '''test-tpu'''
lowerCamelCase_ : Dict = '''us-central1-a'''
lowerCamelCase_ : Any = '''ls'''
lowerCamelCase_ : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase_ : Tuple = '''cd /usr/share'''
lowerCamelCase_ : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase_ : List[Any] = '''Running gcloud compute tpus tpu-vm ssh'''
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : int = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=__magic_name__ )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Any = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : str = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Tuple = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
| 60 | 1 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=30 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10 , __magic_name__=0.02 , __magic_name__=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : List[Any] = image_size
snake_case_ : Optional[int] = patch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = is_training
snake_case_ : List[Any] = use_labels
snake_case_ : Optional[int] = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : List[str] = type_sequence_label_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case_ : Any = (image_size // patch_size) ** 2
snake_case_ : int = num_patches + 1
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : List[Any] = None
if self.use_labels:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : int = self.get_config()
return config, pixel_values, labels
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : int = ViTMSNModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : List[str] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.type_sequence_label_size
snake_case_ : Tuple = ViTMSNForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : Any = model(__magic_name__ , labels=__magic_name__ )
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' )
print('''Labels: {labels}''' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ : Optional[int] = 1
snake_case_ : List[str] = ViTMSNForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : Any = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs
snake_case_ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _a, _a, unittest.TestCase ):
lowerCamelCase_ : List[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowerCamelCase_ : Optional[int] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ : int = False
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : int = False
lowerCamelCase_ : Optional[int] = False
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = ViTMSNModelTester(self )
snake_case_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''' )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Any = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(__magic_name__ )
snake_case_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Optional[int] = [*signature.parameters.keys()]
snake_case_ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = ViTMSNModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None
@slow
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
torch.manual_seed(2 )
snake_case_ : List[str] = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(__magic_name__ )
snake_case_ : str = self.default_image_processor
snake_case_ : str = prepare_img()
snake_case_ : int = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ )
# forward pass
with torch.no_grad():
snake_case_ : Optional[int] = model(**__magic_name__ )
# verify the logits
snake_case_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
snake_case_ : List[Any] = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
| 60 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__=None , **__magic_name__ ) -> Dict:
'''simple docstring'''
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , __magic_name__ , )
super().__init__(args=__magic_name__ , **__magic_name__ )
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
snake_case_ : str = '''mock-s3-bucket'''
snake_case_ : str = f'''s3://{mock_bucket}'''
snake_case_ : Any = extract_path_from_uri(_UpperCamelCase )
assert dataset_path.startswith('''s3://''' ) is False
snake_case_ : Optional[Any] = '''./local/path'''
snake_case_ : List[str] = extract_path_from_uri(_UpperCamelCase )
assert dataset_path == new_dataset_path
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = is_remote_filesystem(_UpperCamelCase )
assert is_remote is True
snake_case_ : Union[str, Any] = fsspec.filesystem('''file''' )
snake_case_ : int = is_remote_filesystem(_UpperCamelCase )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
snake_case_ : Optional[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
snake_case_ : List[Any] = f'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCamelCase )
snake_case_ : Dict = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
snake_case_ : int = os.path.basename(_UpperCamelCase )
snake_case_ : Any = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f, open(_UpperCamelCase , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
snake_case_ : Any = compressed_file_paths[protocol]
snake_case_ : Any = '''dataset.jsonl'''
snake_case_ : Dict = f'''{protocol}://{member_file_path}::{compressed_file_path}'''
snake_case_ , *snake_case_ : Optional[Any] = fsspec.get_fs_token_paths(_UpperCamelCase )
assert fs.isfile(_UpperCamelCase )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = hf_api.dataset_info(_UpperCamelCase , token=_UpperCamelCase )
snake_case_ : List[str] = HfFileSystem(repo_info=_UpperCamelCase , token=_UpperCamelCase )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(_UpperCamelCase ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
snake_case_ : Tuple = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_UpperCamelCase , _UpperCamelCase , clobber=_UpperCamelCase )
with pytest.warns(_UpperCamelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_UpperCamelCase ) == 1
assert (
str(warning_info[0].message )
== f'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 60 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = 10
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = [1, 2, 3, 4]
snake_case_ : Optional[Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__magic_name__ , self.block_size , 0 ) , __magic_name__ )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
snake_case_ : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__magic_name__ , self.block_size , 0 ) , __magic_name__ )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
snake_case_ : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__magic_name__ , self.block_size , 0 ) , __magic_name__ )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
snake_case_ , snake_case_ : Optional[int] = process_story(__magic_name__ )
self.assertEqual(__magic_name__ , [] )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : int = ''''''
snake_case_ , snake_case_ : List[str] = process_story(__magic_name__ )
self.assertEqual(__magic_name__ , [] )
self.assertEqual(__magic_name__ , [] )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
snake_case_ , snake_case_ : List[Any] = process_story(__magic_name__ )
snake_case_ : Any = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(__magic_name__ , __magic_name__ )
snake_case_ : int = ['''It was the best of times.''']
self.assertEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : List[str] = torch.tensor([1, 2, 3, 4] )
snake_case_ : Any = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__magic_name__ , 0 ).numpy() , expected.numpy() )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Any = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
snake_case_ : Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__magic_name__ , 23 ).numpy() , expected.numpy() )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
snake_case_ : Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__magic_name__ , 1 ).numpy() , expected.numpy() )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = 101
snake_case_ : int = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
snake_case_ : Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
snake_case_ : List[str] = compute_token_type_ids(__magic_name__ , __magic_name__ )
np.testing.assert_array_equal(__magic_name__ , __magic_name__ )
| 60 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[Any] = '''encoder-decoder'''
lowerCamelCase_ : Optional[Any] = True
def __init__(self , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__magic_name__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case_ : Any = kwargs.pop('''encoder''' )
snake_case_ : Tuple = encoder_config.pop('''model_type''' )
snake_case_ : Union[str, Any] = kwargs.pop('''decoder''' )
snake_case_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
snake_case_ : Optional[int] = AutoConfig.for_model(__magic_name__ , **__magic_name__ )
snake_case_ : List[str] = AutoConfig.for_model(__magic_name__ , **__magic_name__ )
snake_case_ : Any = True
@classmethod
def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
snake_case_ : Tuple = True
snake_case_ : Optional[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : str = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.encoder.to_dict()
snake_case_ : Dict = self.decoder.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 60 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = tempfile.mkdtemp()
snake_case_ : Union[str, Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
snake_case_ : List[str] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
snake_case_ : Union[str, Any] = os.path.join(self.tmpdirname , __magic_name__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__magic_name__ , __magic_name__ )
def lowerCamelCase (self , **__magic_name__ ) -> Any:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase (self , **__magic_name__ ) -> int:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase (self , **__magic_name__ ) -> Tuple:
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case_ : Dict = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizer()
snake_case_ : int = self.get_rust_tokenizer()
snake_case_ : Tuple = self.get_image_processor()
snake_case_ : str = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor_slow.save_pretrained(self.tmpdirname )
snake_case_ : Optional[int] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__magic_name__ )
snake_case_ : Optional[Any] = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor_fast.save_pretrained(self.tmpdirname )
snake_case_ : Tuple = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __magic_name__ )
self.assertIsInstance(processor_fast.tokenizer , __magic_name__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __magic_name__ )
self.assertIsInstance(processor_fast.image_processor , __magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Any = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ : int = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
snake_case_ : List[str] = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
snake_case_ : Optional[Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.get_image_processor()
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : Optional[int] = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
snake_case_ : str = self.prepare_image_inputs()
snake_case_ : Optional[int] = image_processor(__magic_name__ , return_tensors='''np''' )
snake_case_ : List[str] = processor(images=__magic_name__ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.get_image_processor()
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : Union[str, Any] = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
snake_case_ : Dict = '''lower newer'''
snake_case_ : str = processor(text=__magic_name__ )
snake_case_ : List[str] = tokenizer(__magic_name__ , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = self.get_image_processor()
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : Union[str, Any] = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
snake_case_ : List[str] = '''lower newer'''
snake_case_ : Optional[int] = self.prepare_image_inputs()
snake_case_ : List[Any] = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self.get_image_processor()
snake_case_ : Optional[int] = self.get_tokenizer()
snake_case_ : Union[str, Any] = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
snake_case_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ : Dict = processor.batch_decode(__magic_name__ )
snake_case_ : Optional[int] = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Any = self.get_image_processor()
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : Optional[Any] = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
snake_case_ : Optional[int] = '''lower newer'''
snake_case_ : Optional[Any] = self.prepare_image_inputs()
snake_case_ : List[str] = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 60 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = question_encoder
snake_case_ : Optional[int] = generator
snake_case_ : Optional[Any] = self.question_encoder
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
if os.path.isfile(__magic_name__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
snake_case_ : str = os.path.join(__magic_name__ , '''question_encoder_tokenizer''' )
snake_case_ : List[Any] = os.path.join(__magic_name__ , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(__magic_name__ )
self.generator.save_pretrained(__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
snake_case_ : List[str] = kwargs.pop('''config''' , __magic_name__ )
if config is None:
snake_case_ : int = RagConfig.from_pretrained(__magic_name__ )
snake_case_ : Dict = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
snake_case_ : Dict = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=__magic_name__ , generator=__magic_name__ )
def __call__(self , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
return self.current_tokenizer(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
return self.generator.batch_decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
return self.generator.decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.question_encoder
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.generator
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ) -> BatchEncoding:
'''simple docstring'''
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , __magic_name__ , )
if max_length is None:
snake_case_ : Dict = self.current_tokenizer.model_max_length
snake_case_ : List[str] = self(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case_ : Optional[int] = self.current_tokenizer.model_max_length
snake_case_ : Union[str, Any] = self(
text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
snake_case_ : str = labels['''input_ids''']
return model_inputs
| 60 | 1 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = 0 ) -> list:
"""simple docstring"""
snake_case_ : Dict = length or len(_UpperCamelCase )
snake_case_ : Any = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
snake_case_ , snake_case_ : Tuple = list_data[i + 1], list_data[i]
snake_case_ : Optional[Any] = True
return list_data if not swapped else bubble_sort(_UpperCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=30 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10 , __magic_name__=0.02 , __magic_name__=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : List[Any] = image_size
snake_case_ : Optional[int] = patch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = is_training
snake_case_ : List[Any] = use_labels
snake_case_ : Optional[int] = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : List[str] = type_sequence_label_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case_ : Any = (image_size // patch_size) ** 2
snake_case_ : int = num_patches + 1
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : List[Any] = None
if self.use_labels:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : int = self.get_config()
return config, pixel_values, labels
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : int = ViTMSNModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : List[str] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.type_sequence_label_size
snake_case_ : Tuple = ViTMSNForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : Any = model(__magic_name__ , labels=__magic_name__ )
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' )
print('''Labels: {labels}''' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ : Optional[int] = 1
snake_case_ : List[str] = ViTMSNForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : Any = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs
snake_case_ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _a, _a, unittest.TestCase ):
lowerCamelCase_ : List[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowerCamelCase_ : Optional[int] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ : int = False
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : int = False
lowerCamelCase_ : Optional[int] = False
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = ViTMSNModelTester(self )
snake_case_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''' )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Any = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(__magic_name__ )
snake_case_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Optional[int] = [*signature.parameters.keys()]
snake_case_ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = ViTMSNModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None
@slow
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
torch.manual_seed(2 )
snake_case_ : List[str] = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(__magic_name__ )
snake_case_ : str = self.default_image_processor
snake_case_ : str = prepare_img()
snake_case_ : int = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ )
# forward pass
with torch.no_grad():
snake_case_ : Optional[int] = model(**__magic_name__ )
# verify the logits
snake_case_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
snake_case_ : List[Any] = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
| 60 | 1 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__=None , **__magic_name__ ) -> Dict:
'''simple docstring'''
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , __magic_name__ , )
super().__init__(args=__magic_name__ , **__magic_name__ )
| 60 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : List[Any] = '''efficientnet'''
def __init__(self , __magic_name__ = 3 , __magic_name__ = 600 , __magic_name__ = 2.0 , __magic_name__ = 3.1 , __magic_name__ = 8 , __magic_name__ = [3, 3, 5, 3, 5, 5, 3] , __magic_name__ = [32, 16, 24, 40, 80, 112, 192] , __magic_name__ = [16, 24, 40, 80, 112, 192, 320] , __magic_name__ = [] , __magic_name__ = [1, 2, 2, 2, 1, 2, 1] , __magic_name__ = [1, 2, 2, 3, 3, 4, 1] , __magic_name__ = [1, 6, 6, 6, 6, 6, 6] , __magic_name__ = 0.25 , __magic_name__ = "swish" , __magic_name__ = 2560 , __magic_name__ = "mean" , __magic_name__ = 0.02 , __magic_name__ = 0.001 , __magic_name__ = 0.99 , __magic_name__ = 0.5 , __magic_name__ = 0.2 , **__magic_name__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : List[str] = num_channels
snake_case_ : Tuple = image_size
snake_case_ : Union[str, Any] = width_coefficient
snake_case_ : Tuple = depth_coefficient
snake_case_ : Optional[Any] = depth_divisor
snake_case_ : Optional[int] = kernel_sizes
snake_case_ : str = in_channels
snake_case_ : Optional[Any] = out_channels
snake_case_ : int = depthwise_padding
snake_case_ : Optional[Any] = strides
snake_case_ : Any = num_block_repeats
snake_case_ : Optional[Any] = expand_ratios
snake_case_ : Union[str, Any] = squeeze_expansion_ratio
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Union[str, Any] = hidden_dim
snake_case_ : Any = pooling_type
snake_case_ : List[str] = initializer_range
snake_case_ : str = batch_norm_eps
snake_case_ : Optional[int] = batch_norm_momentum
snake_case_ : Optional[Any] = dropout_rate
snake_case_ : List[str] = drop_connect_rate
snake_case_ : Union[str, Any] = sum(__magic_name__ ) * 4
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = version.parse('''1.11''' )
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase (self ) -> float:
'''simple docstring'''
return 1e-5
| 60 | 1 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCAmelCase_ = {
'''169M''': 1_2,
'''430M''': 2_4,
'''1B5''': 2_4,
'''3B''': 3_2,
'''7B''': 3_2,
'''14B''': 4_0,
}
lowerCAmelCase_ = {
'''169M''': 7_6_8,
'''430M''': 1_0_2_4,
'''1B5''': 2_0_4_8,
'''3B''': 2_5_6_0,
'''7B''': 4_0_9_6,
'''14B''': 5_1_2_0,
}
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Optional[int] = list(state_dict.keys() )
for name in state_dict_keys:
snake_case_ : Dict = state_dict.pop(_UpperCamelCase )
# emb -> embedding
if name.startswith('''emb.''' ):
snake_case_ : Optional[int] = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
snake_case_ : str = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
snake_case_ : str = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , _UpperCamelCase )
# ffn -> feed_forward
snake_case_ : Union[str, Any] = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , _UpperCamelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
snake_case_ : Union[str, Any] = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
snake_case_ : Tuple = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
snake_case_ : Optional[Any] = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
snake_case_ : str = '''rwkv.''' + name
snake_case_ : Dict = weight
return state_dict
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=None ) -> List[Any]:
"""simple docstring"""
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
snake_case_ : int = 50_277
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
snake_case_ : str = PreTrainedTokenizerFast(tokenizer_file=_UpperCamelCase )
snake_case_ : Optional[Any] = len(_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
# 2. Build the config
snake_case_ : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
snake_case_ : Optional[Any] = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(f'''`size` should be one of {possible_sizes}, got {size}.''' )
snake_case_ : Dict = RwkvConfig(
vocab_size=_UpperCamelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_UpperCamelCase )
# 3. Download model file then convert state_dict
snake_case_ : List[Any] = hf_hub_download(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[Any] = torch.load(_UpperCamelCase , map_location='''cpu''' )
snake_case_ : Optional[Any] = convert_state_dict(_UpperCamelCase )
# 4. Split in shards and save
snake_case_ , snake_case_ : List[str] = shard_checkpoint(_UpperCamelCase )
for shard_file, shard in shards.items():
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
if index is not None:
snake_case_ : Optional[int] = os.path.join(_UpperCamelCase , _UpperCamelCase )
# Save the index as well
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
snake_case_ : Optional[int] = json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + '''\n'''
f.write(_UpperCamelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
snake_case_ : Optional[int] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
snake_case_ : Optional[int] = torch.load(os.path.join(_UpperCamelCase , _UpperCamelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
snake_case_ : Any = AutoModelForCausalLM.from_pretrained(_UpperCamelCase )
model.push_to_hub(_UpperCamelCase , max_shard_size='''2GB''' )
tokenizer.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
lowerCAmelCase_ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 60 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase_ = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int)
lowerCAmelCase_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
lowerCAmelCase_ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
lowerCAmelCase_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCAmelCase_ = [0] * args.vocab_size
for k, v in counter.items():
lowerCAmelCase_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 60 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[List[PIL.Image.Image], np.ndarray]
lowerCamelCase_ : Optional[List[bool]]
lowerCamelCase_ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 60 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__ = "▁" , __magic_name__ = True , __magic_name__ = "<unk>" , __magic_name__ = "</s>" , __magic_name__ = "<pad>" , ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
snake_case_ : List[str] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
snake_case_ : int = token_dict['''token''']
snake_case_ : Optional[int] = Tokenizer(Unigram() )
snake_case_ : int = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ),
normalizers.Lowercase(),
] )
snake_case_ : Optional[int] = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__magic_name__ , add_prefix_space=__magic_name__ ),
pre_tokenizers.Digits(individual_digits=__magic_name__ ),
pre_tokenizers.Punctuation(),
] )
snake_case_ : Tuple = decoders.Metaspace(replacement=__magic_name__ , add_prefix_space=__magic_name__ )
snake_case_ : Optional[Any] = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , )
snake_case_ : Optional[Any] = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(__magic_name__ , __magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ = 8000 , __magic_name__ = True , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = trainers.UnigramTrainer(
vocab_size=__magic_name__ , special_tokens=self.special_tokens_list , show_progress=__magic_name__ , )
if isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : Dict = [files]
self._tokenizer.train(__magic_name__ , trainer=__magic_name__ )
self.add_unk_id()
def lowerCamelCase (self , __magic_name__ , __magic_name__ = 8000 , __magic_name__ = True , ) -> int:
'''simple docstring'''
snake_case_ : Any = trainers.UnigramTrainer(
vocab_size=__magic_name__ , special_tokens=self.special_tokens_list , show_progress=__magic_name__ , )
self._tokenizer.train_from_iterator(__magic_name__ , trainer=__magic_name__ )
self.add_unk_id()
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = json.loads(self._tokenizer.to_str() )
snake_case_ : Union[str, Any] = self.special_tokens['''unk''']['''id''']
snake_case_ : Tuple = Tokenizer.from_str(json.dumps(__magic_name__ ) )
| 60 | 1 |
from __future__ import annotations
import os
from collections.abc import Mapping
lowerCAmelCase_ = tuple[int, int]
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
snake_case_ : set[int] = vertices
snake_case_ : dict[EdgeT, int] = {
(min(__magic_name__ ), max(__magic_name__ )): weight for edge, weight in edges.items()
}
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
snake_case_ : List[str] = weight
def lowerCamelCase (self ) -> Graph:
'''simple docstring'''
snake_case_ : Graph = Graph({min(self.vertices )} , {} )
snake_case_ : EdgeT
snake_case_ : int
snake_case_ : EdgeT
snake_case_ : int
while len(subgraph.vertices ) < len(self.vertices ):
snake_case_ : List[Any] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
snake_case_ : Union[str, Any] = edge
snake_case_ : Dict = weight
subgraph.add_edge(__magic_name__ , __magic_name__ )
return subgraph
def lowerCamelCase_ ( _UpperCamelCase = "p107_network.txt" ) -> int:
"""simple docstring"""
snake_case_ : str = os.path.abspath(os.path.dirname(_UpperCamelCase ) )
snake_case_ : str = os.path.join(_UpperCamelCase , _UpperCamelCase )
snake_case_ : dict[EdgeT, int] = {}
snake_case_ : list[str]
snake_case_ : int
snake_case_ : int
with open(_UpperCamelCase ) as f:
snake_case_ : int = f.read().strip().split('''\n''' )
snake_case_ : Union[str, Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(_UpperCamelCase ) ):
for edgea in range(_UpperCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
snake_case_ : Tuple = int(adjaceny_matrix[edgea][edgea] )
snake_case_ : Graph = Graph(set(range(len(_UpperCamelCase ) ) ) , _UpperCamelCase )
snake_case_ : Graph = graph.prims_algorithm()
snake_case_ : int = sum(graph.edges.values() )
snake_case_ : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 60 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[Any] = [False] * len(_UpperCamelCase )
snake_case_ : int = [-1] * len(_UpperCamelCase )
def dfs(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Dict = True
snake_case_ : Dict = c
for u in graph[v]:
if not visited[u]:
dfs(_UpperCamelCase , 1 - c )
for i in range(len(_UpperCamelCase ) ):
if not visited[i]:
dfs(_UpperCamelCase , 0 )
for i in range(len(_UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowerCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 60 | 1 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCAmelCase_ = 5_0_0_0_3
lowerCAmelCase_ = 5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = PLBartTokenizer
lowerCamelCase_ : Optional[int] = None
lowerCamelCase_ : int = False
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : Tuple = PLBartTokenizer(__magic_name__ , language_codes='''base''' , keep_accents=__magic_name__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = PLBartTokenizer(__magic_name__ , language_codes='''base''' , keep_accents=__magic_name__ )
snake_case_ : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__magic_name__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
snake_case_ : List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case_ : List[str] = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
snake_case_ : int = tokenizer.vocab_size
snake_case_ : Optional[Any] = [tokenizer.convert_ids_to_tokens(__magic_name__ ) for x in range(end - 4 , __magic_name__ )]
self.assertListEqual(__magic_name__ , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
snake_case_ : int = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
snake_case_ : Any = tokenizer(__magic_name__ ).input_ids
self.assertEqual(
tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) , __magic_name__ , )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = PLBartTokenizer(__magic_name__ , language_codes='''multi''' , keep_accents=__magic_name__ )
snake_case_ : Tuple = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__magic_name__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
snake_case_ : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
snake_case_ : List[Any] = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
snake_case_ : Dict = tokenizer.vocab_size
snake_case_ : List[str] = [tokenizer.convert_ids_to_tokens(__magic_name__ ) for x in range(end - 7 , __magic_name__ )]
self.assertListEqual(
__magic_name__ , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
snake_case_ : List[Any] = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
snake_case_ : Any = tokenizer(__magic_name__ ).input_ids
self.assertEqual(
tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) , __magic_name__ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : List[Any] = '''uclanlp/plbart-python-en_XX'''
lowerCamelCase_ : Dict = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
lowerCamelCase_ : Optional[Any] = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
lowerCamelCase_ : int = [
134,
5_452,
33_460,
33_441,
33_463,
33_465,
33_463,
33_449,
988,
20,
33_456,
19,
33_456,
771,
39,
4_258,
889,
3_318,
33_441,
33_463,
33_465,
33_463,
33_449,
2_471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCamelCase (cls ) -> Tuple:
'''simple docstring'''
snake_case_ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
snake_case_ : List[str] = 1
return cls
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 5_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 5_0002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 5_0003 )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __magic_name__ )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
self.assertIn(__magic_name__ , self.tokenizer.all_special_ids )
snake_case_ : Dict = [EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2]
snake_case_ : int = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
snake_case_ : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertNotIn(self.tokenizer.eos_token , __magic_name__ )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , __magic_name__ )
snake_case_ : Any = 10
snake_case_ : Union[str, Any] = self.tokenizer(__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __magic_name__ )
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [5_0004, 5_0001] )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = tempfile.mkdtemp()
snake_case_ : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__magic_name__ )
snake_case_ : List[str] = PLBartTokenizer.from_pretrained(__magic_name__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __magic_name__ )
@require_torch
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__magic_name__ , return_tensors='''pt''' )
snake_case_ : Optional[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __magic_name__ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__magic_name__ , truncation=__magic_name__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
snake_case_ : Tuple = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__magic_name__ , __magic_name__ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
snake_case_ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __magic_name__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = self.tokenizer(self.src_text , padding=__magic_name__ , truncation=__magic_name__ , max_length=3 , return_tensors='''pt''' )
snake_case_ : Any = self.tokenizer(
text_target=self.tgt_text , padding=__magic_name__ , truncation=__magic_name__ , max_length=10 , return_tensors='''pt''' )
snake_case_ : Tuple = targets['''input_ids''']
snake_case_ : Tuple = shift_tokens_right(__magic_name__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(__magic_name__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[150, 242, 2, 5_0003]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 5_0001,
} , )
| 60 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=False , ) -> int:
'''simple docstring'''
snake_case_ : int = size if size is not None else {'''height''': 20, '''width''': 20}
snake_case_ : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
snake_case_ : str = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : Dict = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Union[str, Any] = min_resolution
snake_case_ : Tuple = max_resolution
snake_case_ : str = do_resize
snake_case_ : Tuple = size
snake_case_ : int = do_center_crop
snake_case_ : Tuple = crop_size
snake_case_ : int = do_normalize
snake_case_ : Optional[Any] = image_mean
snake_case_ : List[str] = image_std
snake_case_ : str = do_reduce_labels
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
snake_case_ : Union[str, Any] = Image.open(dataset[0]['''file'''] )
snake_case_ : str = Image.open(dataset[1]['''file'''] )
return image, map
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
snake_case_ : str = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
snake_case_ : Optional[Any] = Image.open(ds[0]['''file'''] )
snake_case_ : Optional[Any] = Image.open(ds[1]['''file'''] )
snake_case_ : List[str] = Image.open(ds[2]['''file'''] )
snake_case_ : str = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : List[Any] = BeitImageProcessor if is_vision_available() else None
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : int = BeitImageProcessingTester(self )
@property
def lowerCamelCase (self ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''size''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(__magic_name__ , '''center_crop''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_normalize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''image_mean''' ) )
self.assertTrue(hasattr(__magic_name__ , '''image_std''' ) )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , __magic_name__ )
snake_case_ : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__magic_name__ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , __magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : Any = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : Optional[int] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : List[str] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
snake_case_ : Union[str, Any] = []
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
snake_case_ : Any = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
snake_case_ , snake_case_ : Optional[int] = prepare_semantic_single_inputs()
snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
snake_case_ , snake_case_ : Dict = prepare_semantic_batch_inputs()
snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
snake_case_ , snake_case_ : Tuple = prepare_semantic_single_inputs()
snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
snake_case_ : List[Any] = True
snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 60 | 1 |
import argparse
lowerCAmelCase_ = '''docs/source/_static/js/custom.js'''
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
with open(_UpperCamelCase , encoding='''utf-8''' , newline='''\n''' ) as f:
snake_case_ : List[str] = f.readlines()
snake_case_ : str = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
snake_case_ : str = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
lowerCAmelCase_ = parser.parse_args()
update_custom_js(args.version)
| 60 |
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
lowerCAmelCase_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
lowerCAmelCase_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__="uniform_average" , __magic_name__=True ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = mean_squared_error(
__magic_name__ , __magic_name__ , sample_weight=__magic_name__ , multioutput=__magic_name__ , squared=__magic_name__ )
return {"mse": mse}
| 60 | 1 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''owlvit_text_model'''
def __init__(self , __magic_name__=4_9408 , __magic_name__=512 , __magic_name__=2048 , __magic_name__=12 , __magic_name__=8 , __magic_name__=16 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=0 , __magic_name__=4_9406 , __magic_name__=4_9407 , **__magic_name__ , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
snake_case_ : int = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[Any] = intermediate_size
snake_case_ : str = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : str = hidden_act
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Dict = attention_dropout
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : int = initializer_factor
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : str = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
snake_case_ : str = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = '''owlvit_vision_model'''
def __init__(self , __magic_name__=768 , __magic_name__=3072 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3 , __magic_name__=768 , __magic_name__=32 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , **__magic_name__ , ) -> int:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : List[Any] = num_channels
snake_case_ : Union[str, Any] = image_size
snake_case_ : Dict = patch_size
snake_case_ : List[Any] = hidden_act
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Dict = attention_dropout
snake_case_ : List[str] = initializer_range
snake_case_ : List[Any] = initializer_factor
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
snake_case_ : str = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = '''owlvit'''
lowerCamelCase_ : Optional[int] = True
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=512 , __magic_name__=2.6_592 , __magic_name__=True , **__magic_name__ , ) -> int:
'''simple docstring'''
super().__init__(**__magic_name__ )
if text_config is None:
snake_case_ : Tuple = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
snake_case_ : str = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
snake_case_ : str = OwlViTTextConfig(**__magic_name__ )
snake_case_ : Union[str, Any] = OwlViTVisionConfig(**__magic_name__ )
snake_case_ : Any = projection_dim
snake_case_ : Union[str, Any] = logit_scale_init_value
snake_case_ : str = return_dict
snake_case_ : Any = 1.0
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : Optional[Any] = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = {}
snake_case_ : Union[str, Any] = text_config
snake_case_ : Optional[Any] = vision_config
return cls.from_dict(__magic_name__ , **__magic_name__ )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : List[Any] = self.text_config.to_dict()
snake_case_ : List[Any] = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
class __lowerCAmelCase ( _a ):
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def lowerCamelCase (self ) -> float:
'''simple docstring'''
return 1e-4
def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Dict = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__magic_name__ , seq_length=__magic_name__ , framework=__magic_name__ )
snake_case_ : List[str] = super().generate_dummy_inputs(
processor.image_processor , batch_size=__magic_name__ , framework=__magic_name__ )
return {**text_input_dict, **image_input_dict}
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return 14
| 60 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __lowerCAmelCase :
lowerCamelCase_ : Any = None
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ : List[Any] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Optional[int] = os.path.join(__magic_name__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(__magic_name__ )
snake_case_ : str = self.feature_extraction_class.from_json_file(__magic_name__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : str = feat_extract_first.save_pretrained(__magic_name__ )[0]
check_json_file_has_correct_format(__magic_name__ )
snake_case_ : Dict = self.feature_extraction_class.from_pretrained(__magic_name__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.feature_extraction_class()
self.assertIsNotNone(__magic_name__ )
| 60 | 1 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
snake_case_ , snake_case_ : Dict = get_aligned_output_features_output_indices(__magic_name__ , __magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , ['''c'''] )
self.assertEqual(__magic_name__ , [2] )
# Out indices set to match out features
snake_case_ , snake_case_ : List[str] = get_aligned_output_features_output_indices(['''a''', '''c'''] , __magic_name__ , __magic_name__ )
self.assertEqual(__magic_name__ , ['''a''', '''c'''] )
self.assertEqual(__magic_name__ , [0, 2] )
# Out features set to match out indices
snake_case_ , snake_case_ : Dict = get_aligned_output_features_output_indices(__magic_name__ , [0, 2] , __magic_name__ )
self.assertEqual(__magic_name__ , ['''a''', '''c'''] )
self.assertEqual(__magic_name__ , [0, 2] )
# Out features selected from negative indices
snake_case_ , snake_case_ : List[str] = get_aligned_output_features_output_indices(__magic_name__ , [-3, -1] , __magic_name__ )
self.assertEqual(__magic_name__ , ['''a''', '''c'''] )
self.assertEqual(__magic_name__ , [-3, -1] )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(__magic_name__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , __magic_name__ )
# Out features must be a list
with self.assertRaises(__magic_name__ ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(__magic_name__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(__magic_name__ ):
verify_out_features_out_indices(__magic_name__ , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(__magic_name__ ):
verify_out_features_out_indices(__magic_name__ , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(__magic_name__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(__magic_name__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(__magic_name__ ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = BackboneMixin()
snake_case_ : List[str] = ['''a''', '''b''', '''c''']
snake_case_ : Any = ['''a''', '''c''']
snake_case_ : Union[str, Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
snake_case_ : Union[str, Any] = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
snake_case_ : str = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 60 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase :
lowerCamelCase_ : str
lowerCamelCase_ : str = None
@staticmethod
def lowerCamelCase () -> Any:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowerCamelCase (cls ) -> List[Any]:
'''simple docstring'''
return F'''`pip install {cls.pip_package or cls.name}`'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[int] = '''optuna'''
@staticmethod
def lowerCamelCase () -> Union[str, Any]:
'''simple docstring'''
return is_optuna_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return run_hp_search_optuna(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return default_hp_space_optuna(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''ray'''
lowerCamelCase_ : List[str] = '''\'ray[tune]\''''
@staticmethod
def lowerCamelCase () -> List[Any]:
'''simple docstring'''
return is_ray_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_ray(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return default_hp_space_ray(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''sigopt'''
@staticmethod
def lowerCamelCase () -> Optional[int]:
'''simple docstring'''
return is_sigopt_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
return run_hp_search_sigopt(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
return default_hp_space_sigopt(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''wandb'''
@staticmethod
def lowerCamelCase () -> Dict:
'''simple docstring'''
return is_wandb_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_wandb(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return default_hp_space_wandb(__magic_name__ )
lowerCAmelCase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
snake_case_ : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_UpperCamelCase ) > 0:
snake_case_ : Dict = available_backends[0].name
if len(_UpperCamelCase ) > 1:
logger.info(
f'''{len(_UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 60 | 1 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = inspect.getfile(accelerate.test_utils )
snake_case_ : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
snake_case_ : Any = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = F'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
snake_case_ : Dict = [sys.executable] + distributed_args
execute_subprocess_async(__magic_name__ , env=os.environ.copy() )
| 60 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list:
"""simple docstring"""
snake_case_ : Tuple = len(_UpperCamelCase )
snake_case_ : Union[str, Any] = [[0] * n for i in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
snake_case_ : Any = y_points[i]
for i in range(2 , _UpperCamelCase ):
for j in range(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Optional[int] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 | 1 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = question_encoder
snake_case_ : Optional[int] = generator
snake_case_ : Optional[Any] = self.question_encoder
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
if os.path.isfile(__magic_name__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
snake_case_ : str = os.path.join(__magic_name__ , '''question_encoder_tokenizer''' )
snake_case_ : List[Any] = os.path.join(__magic_name__ , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(__magic_name__ )
self.generator.save_pretrained(__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
snake_case_ : List[str] = kwargs.pop('''config''' , __magic_name__ )
if config is None:
snake_case_ : int = RagConfig.from_pretrained(__magic_name__ )
snake_case_ : Dict = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
snake_case_ : Dict = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=__magic_name__ , generator=__magic_name__ )
def __call__(self , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
return self.current_tokenizer(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
return self.generator.batch_decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
return self.generator.decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.question_encoder
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.generator
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ) -> BatchEncoding:
'''simple docstring'''
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , __magic_name__ , )
if max_length is None:
snake_case_ : Dict = self.current_tokenizer.model_max_length
snake_case_ : List[str] = self(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case_ : Optional[int] = self.current_tokenizer.model_max_length
snake_case_ : Union[str, Any] = self(
text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
snake_case_ : str = labels['''input_ids''']
return model_inputs
| 60 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return getitem, k
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
return setitem, k, v
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
return delitem, k
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) -> str:
"""simple docstring"""
try:
return fun(_UpperCamelCase , *_UpperCamelCase ), None
except Exception as e:
return None, e
lowerCAmelCase_ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCAmelCase_ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCAmelCase_ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCAmelCase_ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCAmelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCAmelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
snake_case_ : Any = HashMap(initial_block_size=4 )
snake_case_ : Union[str, Any] = {}
for _, (fun, *args) in enumerate(_UpperCamelCase ):
snake_case_ , snake_case_ : str = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
assert my_res == py_res
assert str(_UpperCamelCase ) == str(_UpperCamelCase )
assert set(_UpperCamelCase ) == set(_UpperCamelCase )
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
assert set(my.items() ) == set(py.items() )
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
def is_public(_UpperCamelCase ) -> bool:
return not name.startswith('''_''' )
snake_case_ : str = {name for name in dir({} ) if is_public(_UpperCamelCase )}
snake_case_ : str = {name for name in dir(HashMap() ) if is_public(_UpperCamelCase )}
assert dict_public_names > hash_public_names
| 60 | 1 |
lowerCAmelCase_ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 60 |
from __future__ import annotations
def lowerCamelCase_ ( _UpperCamelCase ) -> list:
"""simple docstring"""
if len(_UpperCamelCase ) == 0:
return []
snake_case_ , snake_case_ : Dict = min(_UpperCamelCase ), max(_UpperCamelCase )
snake_case_ : List[str] = int(max_value - min_value ) + 1
snake_case_ : list[list] = [[] for _ in range(_UpperCamelCase )]
for i in my_list:
buckets[int(i - min_value )].append(_UpperCamelCase )
return [v for bucket in buckets for v in sorted(_UpperCamelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 60 | 1 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''', _a, )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : List[Any] = RobertaConfig
lowerCamelCase_ : Union[str, Any] = '''roberta'''
def __init__(self , __magic_name__ ) -> List[str]:
'''simple docstring'''
super().__init__(__magic_name__ )
snake_case_ : Optional[Any] = RobertaEmbeddings(__magic_name__ )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''', _a, )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[Any] = RobertaConfig
lowerCamelCase_ : Optional[Any] = '''roberta'''
def __init__(self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(__magic_name__ )
snake_case_ : Any = config.num_labels
snake_case_ : int = config.num_hidden_layers
snake_case_ : List[Any] = DeeRobertaModel(__magic_name__ )
snake_case_ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Union[str, Any] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__magic_name__ )
def lowerCamelCase (self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=-1 , __magic_name__=False , ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.num_layers
try:
snake_case_ : Any = self.roberta(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , position_ids=__magic_name__ , head_mask=__magic_name__ , inputs_embeds=__magic_name__ , )
snake_case_ : Optional[int] = outputs[1]
snake_case_ : Any = self.dropout(__magic_name__ )
snake_case_ : Any = self.classifier(__magic_name__ )
snake_case_ : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : Tuple = e.message
snake_case_ : int = e.exit_layer
snake_case_ : List[Any] = outputs[0]
if not self.training:
snake_case_ : int = entropy(__magic_name__ )
snake_case_ : Dict = []
snake_case_ : List[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Union[str, Any] = MSELoss()
snake_case_ : List[Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : int = CrossEntropyLoss()
snake_case_ : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : Dict = []
for highway_exit in outputs[-1]:
snake_case_ : int = highway_exit[0]
if not self.training:
highway_logits_all.append(__magic_name__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : Dict = MSELoss()
snake_case_ : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Optional[Any] = CrossEntropyLoss()
snake_case_ : Dict = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__magic_name__ )
if train_highway:
snake_case_ : Any = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : int = (loss,) + outputs
if not self.training:
snake_case_ : Tuple = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : List[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 60 |
import tensorflow as tf
from ...tf_utils import shape_list
class __lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=1 , __magic_name__=False , **__magic_name__ ) -> Dict:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : List[Any] = vocab_size
snake_case_ : Dict = d_embed
snake_case_ : Union[str, Any] = d_proj
snake_case_ : str = cutoffs + [vocab_size]
snake_case_ : int = [0] + self.cutoffs
snake_case_ : Optional[int] = div_val
snake_case_ : int = self.cutoffs[0]
snake_case_ : Any = len(self.cutoffs ) - 1
snake_case_ : Union[str, Any] = self.shortlist_size + self.n_clusters
snake_case_ : str = keep_order
snake_case_ : int = []
snake_case_ : Union[str, Any] = []
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
snake_case_ : Tuple = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_weight''' )
snake_case_ : Optional[Any] = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
snake_case_ : List[str] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' , )
self.out_projs.append(__magic_name__ )
else:
self.out_projs.append(__magic_name__ )
snake_case_ : Optional[Any] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , )
snake_case_ : List[str] = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case_ : Optional[Any] = self.d_embed // (self.div_val**i)
snake_case_ : int = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' )
self.out_projs.append(__magic_name__ )
snake_case_ : int = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , )
snake_case_ : Any = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(__magic_name__ )
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = x
if proj is not None:
snake_case_ : List[str] = tf.einsum('''ibd,ed->ibe''' , __magic_name__ , __magic_name__ )
return tf.einsum('''ibd,nd->ibn''' , __magic_name__ , __magic_name__ ) + b
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = shape_list(__magic_name__ )
snake_case_ : Tuple = tf.range(lp_size[0] , dtype=target.dtype )
snake_case_ : Dict = tf.stack([r, target] , 1 )
return tf.gather_nd(__magic_name__ , __magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=True , __magic_name__=False ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = 0
if self.n_clusters == 0:
snake_case_ : Any = self._logit(__magic_name__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
snake_case_ : Union[str, Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__magic_name__ , logits=__magic_name__ )
snake_case_ : Optional[Any] = tf.nn.log_softmax(__magic_name__ , axis=-1 )
else:
snake_case_ : Optional[int] = shape_list(__magic_name__ )
snake_case_ : int = []
snake_case_ : List[Any] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
snake_case_ : str = (target >= l_idx) & (target < r_idx)
snake_case_ : Dict = tf.where(__magic_name__ )
snake_case_ : List[str] = tf.boolean_mask(__magic_name__ , __magic_name__ ) - l_idx
if self.div_val == 1:
snake_case_ : Any = self.out_layers[0][0][l_idx:r_idx]
snake_case_ : Dict = self.out_layers[0][1][l_idx:r_idx]
else:
snake_case_ : Union[str, Any] = self.out_layers[i][0]
snake_case_ : int = self.out_layers[i][1]
if i == 0:
snake_case_ : List[Any] = tf.concat([cur_W, self.cluster_weight] , 0 )
snake_case_ : Tuple = tf.concat([cur_b, self.cluster_bias] , 0 )
snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[0] )
snake_case_ : Any = tf.nn.log_softmax(__magic_name__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ )
snake_case_ : Tuple = self._gather_logprob(__magic_name__ , __magic_name__ )
else:
snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[i] )
snake_case_ : Union[str, Any] = tf.nn.log_softmax(__magic_name__ )
snake_case_ : Optional[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
snake_case_ : Optional[int] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__magic_name__ )
if target is not None:
snake_case_ : Any = tf.boolean_mask(__magic_name__ , __magic_name__ )
snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ )
snake_case_ : str = self._gather_logprob(__magic_name__ , __magic_name__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__magic_name__ , -cur_logprob , shape_list(__magic_name__ ) )
snake_case_ : str = tf.concat(__magic_name__ , axis=-1 )
if target is not None:
if return_mean:
snake_case_ : int = tf.reduce_mean(__magic_name__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__magic_name__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__magic_name__ , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 60 | 1 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowerCAmelCase_ = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
lowerCAmelCase_ = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
lowerCAmelCase_ = '''zero2'''
lowerCAmelCase_ = '''zero3'''
lowerCAmelCase_ = [ZEROa, ZEROa]
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Tuple = parameterized.to_safe_name('''_'''.join(str(_UpperCamelCase ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
lowerCAmelCase_ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __lowerCAmelCase ( _a ):
@parameterized.expand(__magic_name__ , name_func=__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
self.run_and_check(
stage=__magic_name__ , model=__magic_name__ , distributed=__magic_name__ , fpaa=__magic_name__ , )
@require_torch_multi_gpu
@parameterized.expand(__magic_name__ , name_func=__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
self.run_and_check(
stage=__magic_name__ , model=__magic_name__ , distributed=__magic_name__ , fpaa=__magic_name__ , )
@parameterized.expand(__magic_name__ , name_func=__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
self.run_and_check(
stage=__magic_name__ , model=__magic_name__ , distributed=__magic_name__ , fpaa=__magic_name__ , )
@require_torch_multi_gpu
@parameterized.expand(__magic_name__ , name_func=__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
self.run_and_check(
stage=__magic_name__ , model=__magic_name__ , distributed=__magic_name__ , fpaa=__magic_name__ , )
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
pass
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ = 10 , __magic_name__ = True , __magic_name__ = True , __magic_name__ = True , ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = models[model]
snake_case_ : Optional[int] = self.run_trainer(
stage=__magic_name__ , model_name=__magic_name__ , eval_steps=__magic_name__ , num_train_epochs=1 , distributed=__magic_name__ , fpaa=__magic_name__ , )
self.do_checks(__magic_name__ )
return output_dir
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ = 10 , __magic_name__ = 1 , __magic_name__ = True , __magic_name__ = True , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.get_auto_remove_tmp_dir('''./xxx''' , after=__magic_name__ )
snake_case_ : Optional[Any] = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(__magic_name__ )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
snake_case_ : Tuple = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
snake_case_ : Dict = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
snake_case_ : Optional[int] = self.get_launcher(__magic_name__ )
snake_case_ : List[Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__magic_name__ , env=self.get_env() )
return output_dir
def lowerCamelCase (self , __magic_name__=False ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 60 |
import requests
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None:
"""simple docstring"""
snake_case_ : Tuple = {'''Content-Type''': '''application/json'''}
snake_case_ : Any = requests.post(_UpperCamelCase , json={'''text''': message_body} , headers=_UpperCamelCase )
if response.status_code != 200:
snake_case_ : List[Any] = (
'''Request to slack returned an error '''
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 60 | 1 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__=99 , __magic_name__=13 , __magic_name__=16 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=False , __magic_name__=True , __magic_name__=2 , __magic_name__=32 , __magic_name__=4 , __magic_name__=4 , __magic_name__=30 , __magic_name__=0 , __magic_name__=1 , __magic_name__=2 , __magic_name__=None , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[Any] = decoder_seq_length
# For common tests
snake_case_ : Tuple = self.decoder_seq_length
snake_case_ : Tuple = is_training
snake_case_ : Optional[Any] = use_attention_mask
snake_case_ : Optional[int] = use_labels
snake_case_ : Dict = vocab_size
snake_case_ : Tuple = d_model
snake_case_ : Tuple = d_model
snake_case_ : str = decoder_layers
snake_case_ : List[str] = decoder_layers
snake_case_ : List[str] = decoder_ffn_dim
snake_case_ : int = decoder_attention_heads
snake_case_ : List[str] = decoder_attention_heads
snake_case_ : List[Any] = eos_token_id
snake_case_ : Optional[int] = bos_token_id
snake_case_ : Optional[Any] = pad_token_id
snake_case_ : str = decoder_start_token_id
snake_case_ : List[Any] = use_cache
snake_case_ : int = max_position_embeddings
snake_case_ : Optional[int] = None
snake_case_ : List[str] = decoder_seq_length
snake_case_ : str = 2
snake_case_ : Union[str, Any] = 1
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case_ : Any = None
if self.use_attention_mask:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
snake_case_ : List[Any] = None
if self.use_labels:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
snake_case_ : Any = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> int:
'''simple docstring'''
snake_case_ : List[str] = True
snake_case_ : List[Any] = TrOCRDecoder(config=__magic_name__ ).to(__magic_name__ ).eval()
snake_case_ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
snake_case_ : Any = model(__magic_name__ , use_cache=__magic_name__ )
snake_case_ : int = model(__magic_name__ )
snake_case_ : int = model(__magic_name__ , use_cache=__magic_name__ )
self.parent.assertTrue(len(__magic_name__ ) == len(__magic_name__ ) )
self.parent.assertTrue(len(__magic_name__ ) == len(__magic_name__ ) + 1 )
snake_case_ : Union[str, Any] = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
snake_case_ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
snake_case_ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ : Tuple = model(__magic_name__ )['''last_hidden_state''']
snake_case_ : str = model(__magic_name__ , past_key_values=__magic_name__ )['''last_hidden_state''']
# select random slice
snake_case_ : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
snake_case_ : List[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1e-3 )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = config_and_inputs
snake_case_ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _a, _a, _a, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase_ : int = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ : Union[str, Any] = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : Optional[int] = False
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=__magic_name__ )
snake_case_ : int = ConfigTester(self , config_class=__magic_name__ )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__magic_name__ )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
pass
| 60 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''owlvit_text_model'''
def __init__(self , __magic_name__=4_9408 , __magic_name__=512 , __magic_name__=2048 , __magic_name__=12 , __magic_name__=8 , __magic_name__=16 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=0 , __magic_name__=4_9406 , __magic_name__=4_9407 , **__magic_name__ , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
snake_case_ : int = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[Any] = intermediate_size
snake_case_ : str = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : str = hidden_act
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Dict = attention_dropout
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : int = initializer_factor
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : str = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
snake_case_ : str = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = '''owlvit_vision_model'''
def __init__(self , __magic_name__=768 , __magic_name__=3072 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3 , __magic_name__=768 , __magic_name__=32 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , **__magic_name__ , ) -> int:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : List[Any] = num_channels
snake_case_ : Union[str, Any] = image_size
snake_case_ : Dict = patch_size
snake_case_ : List[Any] = hidden_act
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Dict = attention_dropout
snake_case_ : List[str] = initializer_range
snake_case_ : List[Any] = initializer_factor
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
snake_case_ : str = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = '''owlvit'''
lowerCamelCase_ : Optional[int] = True
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=512 , __magic_name__=2.6_592 , __magic_name__=True , **__magic_name__ , ) -> int:
'''simple docstring'''
super().__init__(**__magic_name__ )
if text_config is None:
snake_case_ : Tuple = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
snake_case_ : str = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
snake_case_ : str = OwlViTTextConfig(**__magic_name__ )
snake_case_ : Union[str, Any] = OwlViTVisionConfig(**__magic_name__ )
snake_case_ : Any = projection_dim
snake_case_ : Union[str, Any] = logit_scale_init_value
snake_case_ : str = return_dict
snake_case_ : Any = 1.0
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : Optional[Any] = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = {}
snake_case_ : Union[str, Any] = text_config
snake_case_ : Optional[Any] = vision_config
return cls.from_dict(__magic_name__ , **__magic_name__ )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : List[Any] = self.text_config.to_dict()
snake_case_ : List[Any] = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
class __lowerCAmelCase ( _a ):
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def lowerCamelCase (self ) -> float:
'''simple docstring'''
return 1e-4
def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Dict = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__magic_name__ , seq_length=__magic_name__ , framework=__magic_name__ )
snake_case_ : List[str] = super().generate_dummy_inputs(
processor.image_processor , batch_size=__magic_name__ , framework=__magic_name__ )
return {**text_input_dict, **image_input_dict}
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return 14
| 60 | 1 |
import heapq
import sys
import numpy as np
lowerCAmelCase_ = tuple[int, int]
class __lowerCAmelCase :
def __init__(self ) -> int:
'''simple docstring'''
snake_case_ : int = []
snake_case_ : Dict = set()
def lowerCamelCase (self ) -> str:
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
return len(self.elements ) == 0
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__magic_name__ )
else:
# update
# print("update", item)
snake_case_ : Dict = []
((snake_case_) , (snake_case_)) : List[Any] = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((snake_case_) , (snake_case_)) : List[str] = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def lowerCamelCase (self , __magic_name__ ) -> Tuple:
'''simple docstring'''
if item in self.set:
self.set.remove(__magic_name__ )
snake_case_ : List[str] = []
((snake_case_) , (snake_case_)) : List[str] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((snake_case_) , (snake_case_)) : Tuple = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
return self.elements[0][1]
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
((snake_case_) , (snake_case_)) : Any = heapq.heappop(self.elements )
self.set.remove(__magic_name__ )
return (priority, item)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = np.array(_UpperCamelCase )
snake_case_ : int = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : Optional[int] = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
snake_case_ : List[Any] = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
snake_case_ : Any = '''*'''
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
snake_case_ : Dict = '''#'''
snake_case_ : List[str] = '''-'''
snake_case_ : Dict = back_pointer[goal]
while x != start:
((snake_case_) , (snake_case_)) : Dict = x
# print(x)
snake_case_ : Dict = '''-'''
snake_case_ : str = back_pointer[x]
snake_case_ : Union[str, Any] = '''-'''
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
snake_case_ : Optional[int] = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=''' ''' )
snake_case_ : Tuple = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any:
"""simple docstring"""
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((snake_case_) , (snake_case_)) : str = s
snake_case_ : Dict = (x - 1, y)
snake_case_ : str = (x + 1, y)
snake_case_ : List[str] = (x, y + 1)
snake_case_ : Optional[Any] = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
snake_case_ : int = -1
snake_case_ : str = float('''inf''' )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
snake_case_ : List[Any] = g_function[s] + 1
snake_case_ : Optional[int] = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
snake_case_ : List[Any] = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
lowerCAmelCase_ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
lowerCAmelCase_ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(1_0, 1),
(1_1, 1),
(1_2, 1),
(1_3, 1),
(1_4, 1),
(1_5, 1),
(1_6, 1),
(1_7, 1),
(1_8, 1),
(1_9, 1),
]
lowerCAmelCase_ = make_common_ground()
lowerCAmelCase_ = blocks_blk
# hyper parameters
lowerCAmelCase_ = 1
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2_0
lowerCAmelCase_ = 3 # one consistent and two other inconsistent
# start and end destination
lowerCAmelCase_ = (0, 0)
lowerCAmelCase_ = (n - 1, n - 1)
lowerCAmelCase_ = 1
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Optional[Any] = {start: 0, goal: float('''inf''' )}
snake_case_ : int = {start: -1, goal: -1}
snake_case_ : Any = []
snake_case_ : Tuple = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
snake_case_ : list[int] = []
snake_case_ : list[int] = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
snake_case_ , snake_case_ : Dict = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
snake_case_ : List[Any] = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 60 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : Tuple = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase_ : Union[str, Any] = ['''accelerate''', '''launch''']
lowerCamelCase_ : Tuple = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase_ : Tuple = '''default_config.yaml'''
lowerCamelCase_ : str = config_folder / config_file
lowerCamelCase_ : List[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase_ : Dict = Path('''tests/test_configs''' )
@classmethod
def lowerCamelCase (cls ) -> Dict:
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowerCamelCase (cls ) -> Any:
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=__magic_name__ ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(__magic_name__ ), self.test_file_path] , env=os.environ.copy() )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : List[str] = '''test-tpu'''
lowerCamelCase_ : Dict = '''us-central1-a'''
lowerCamelCase_ : Any = '''ls'''
lowerCamelCase_ : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase_ : Tuple = '''cd /usr/share'''
lowerCamelCase_ : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase_ : List[Any] = '''Running gcloud compute tpus tpu-vm ssh'''
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : int = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=__magic_name__ )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Any = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : str = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Tuple = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
| 60 | 1 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCAmelCase_ = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
lowerCAmelCase_ = 1_0
lowerCAmelCase_ = 2_5_6
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[MinHash]:
"""simple docstring"""
if len(_UpperCamelCase ) < MIN_NUM_TOKENS:
return None
snake_case_ : Optional[int] = MinHash(num_perm=_UpperCamelCase )
for token in set(_UpperCamelCase ):
min_hash.update(token.encode() )
return min_hash
def lowerCamelCase_ ( _UpperCamelCase ) -> Set[str]:
"""simple docstring"""
return {t for t in NON_ALPHA.split(_UpperCamelCase ) if len(t.strip() ) > 0}
class __lowerCAmelCase :
def __init__(self , *,
__magic_name__ = 0.85 , ) -> int:
'''simple docstring'''
snake_case_ : int = duplication_jaccard_threshold
snake_case_ : Tuple = NUM_PERM
snake_case_ : Dict = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
snake_case_ : Dict = defaultdict(__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
snake_case_ : str = self._index.query(__magic_name__ )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(__magic_name__ , __magic_name__ )
if len(__magic_name__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__magic_name__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__magic_name__ )
def lowerCamelCase (self ) -> List[List[Dict]]:
'''simple docstring'''
snake_case_ : List[Any] = []
for base, duplicates in self._duplicate_clusters.items():
snake_case_ : List[str] = [base] + list(__magic_name__ )
# reformat the cluster to be a list of dict
snake_case_ : int = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(__magic_name__ )
return duplicate_clusters
def lowerCamelCase (self , __magic_name__ ) -> None:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_duplicate_clusters()
with open(__magic_name__ , '''w''' ) as f:
json.dump(__magic_name__ , __magic_name__ )
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ , snake_case_ : Any = element
snake_case_ : Tuple = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_UpperCamelCase , max_queue_size=10_000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = DuplicationIndex(duplication_jaccard_threshold=_UpperCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_UpperCamelCase ) ) , max_queue_size=100 ) ):
di.add(_UpperCamelCase , _UpperCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> float:
"""simple docstring"""
snake_case_ : str = get_tokens(_UpperCamelCase )
snake_case_ : Optional[int] = get_tokens(_UpperCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCAmelCase_ = None
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
snake_case_ : Dict = []
for elementa in cluster:
snake_case_ : int = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
snake_case_ : Dict = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(_UpperCamelCase , _UpperCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
snake_case_ : Optional[Any] = 1
extremes.append(_UpperCamelCase )
return extremes
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
global _shared_dataset
snake_case_ : Optional[Any] = dataset
snake_case_ : Union[str, Any] = []
snake_case_ : Any = partial(_find_cluster_extremes_shared , jaccard_threshold=_UpperCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_UpperCamelCase , _UpperCamelCase , ) , total=len(_UpperCamelCase ) , ):
extremes_list.append(_UpperCamelCase )
return extremes_list
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
"""simple docstring"""
snake_case_ : Optional[int] = make_duplicate_clusters(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Union[str, Any] = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
snake_case_ : Optional[Any] = {}
snake_case_ : str = find_extremes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for extremes in extremes_clusters:
for element in extremes:
snake_case_ : Dict = element
snake_case_ : List[Any] = duplicate_indices - set(extreme_dict.keys() )
snake_case_ : str = dataset.filter(lambda _UpperCamelCase , _UpperCamelCase : idx not in remove_indices , with_indices=_UpperCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
snake_case_ : str = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
snake_case_ : Union[str, Any] = extreme_dict[element['''base_index''']]['''copies''']
print(f'''Original dataset size: {len(_UpperCamelCase )}''' )
print(f'''Number of duplicate clusters: {len(_UpperCamelCase )}''' )
print(f'''Files in duplicate cluster: {len(_UpperCamelCase )}''' )
print(f'''Unique files in duplicate cluster: {len(_UpperCamelCase )}''' )
print(f'''Filtered dataset size: {len(_UpperCamelCase )}''' )
return ds_filter, duplicate_clusters
| 60 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__=None , **__magic_name__ ) -> Dict:
'''simple docstring'''
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , __magic_name__ , )
super().__init__(args=__magic_name__ , **__magic_name__ )
| 60 | 1 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=False , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = parent
snake_case_ : List[Any] = batch_size
snake_case_ : List[Any] = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Optional[Any] = use_input_mask
snake_case_ : str = use_token_type_ids
snake_case_ : List[str] = use_labels
snake_case_ : int = vocab_size
snake_case_ : int = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Tuple = intermediate_size
snake_case_ : int = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : List[str] = max_position_embeddings
snake_case_ : List[Any] = type_vocab_size
snake_case_ : List[str] = type_sequence_label_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = num_labels
snake_case_ : Optional[int] = num_choices
snake_case_ : Any = scope
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : List[Any] = None
if self.use_input_mask:
snake_case_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : int = None
snake_case_ : Optional[Any] = None
snake_case_ : Tuple = None
if self.use_labels:
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = DistilBertModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : List[Any] = model(__magic_name__ , __magic_name__ )
snake_case_ : List[Any] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : str = DistilBertForMaskedLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = DistilBertForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : Union[str, Any] = model(
__magic_name__ , attention_mask=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : str = self.num_labels
snake_case_ : str = DistilBertForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : Optional[int] = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.num_labels
snake_case_ : List[Any] = DistilBertForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = self.num_choices
snake_case_ : List[Any] = DistilBertForMultipleChoice(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : Tuple = model(
__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.prepare_config_and_inputs()
((snake_case_) , (snake_case_) , (snake_case_) , (snake_case_) , (snake_case_) , (snake_case_)) : Union[str, Any] = config_and_inputs
snake_case_ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _a, _a, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase_ : str = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : int = True
lowerCamelCase_ : str = True
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = DistilBertModelTester(self )
snake_case_ : List[str] = ConfigTester(self , config_class=__magic_name__ , dim=37 )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__magic_name__ )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__magic_name__ )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__magic_name__ )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__magic_name__ )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__magic_name__ )
@slow
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Tuple = DistilBertModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@slow
@require_torch_gpu
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ , snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
snake_case_ : List[str] = True
snake_case_ : Tuple = model_class(config=__magic_name__ )
snake_case_ : Tuple = self._prepare_for_class(__magic_name__ , __magic_name__ )
snake_case_ : List[str] = torch.jit.trace(
__magic_name__ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__magic_name__ , os.path.join(__magic_name__ , '''traced_model.pt''' ) )
snake_case_ : Union[str, Any] = torch.jit.load(os.path.join(__magic_name__ , '''traced_model.pt''' ) , map_location=__magic_name__ )
loaded(inputs_dict['''input_ids'''].to(__magic_name__ ) , inputs_dict['''attention_mask'''].to(__magic_name__ ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
snake_case_ : List[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
snake_case_ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case_ : int = model(__magic_name__ , attention_mask=__magic_name__ )[0]
snake_case_ : int = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __magic_name__ )
snake_case_ : Union[str, Any] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) )
| 60 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
snake_case_ : str = '''mock-s3-bucket'''
snake_case_ : str = f'''s3://{mock_bucket}'''
snake_case_ : Any = extract_path_from_uri(_UpperCamelCase )
assert dataset_path.startswith('''s3://''' ) is False
snake_case_ : Optional[Any] = '''./local/path'''
snake_case_ : List[str] = extract_path_from_uri(_UpperCamelCase )
assert dataset_path == new_dataset_path
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = is_remote_filesystem(_UpperCamelCase )
assert is_remote is True
snake_case_ : Union[str, Any] = fsspec.filesystem('''file''' )
snake_case_ : int = is_remote_filesystem(_UpperCamelCase )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
snake_case_ : Optional[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
snake_case_ : List[Any] = f'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCamelCase )
snake_case_ : Dict = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
snake_case_ : int = os.path.basename(_UpperCamelCase )
snake_case_ : Any = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f, open(_UpperCamelCase , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
snake_case_ : Any = compressed_file_paths[protocol]
snake_case_ : Any = '''dataset.jsonl'''
snake_case_ : Dict = f'''{protocol}://{member_file_path}::{compressed_file_path}'''
snake_case_ , *snake_case_ : Optional[Any] = fsspec.get_fs_token_paths(_UpperCamelCase )
assert fs.isfile(_UpperCamelCase )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = hf_api.dataset_info(_UpperCamelCase , token=_UpperCamelCase )
snake_case_ : List[str] = HfFileSystem(repo_info=_UpperCamelCase , token=_UpperCamelCase )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(_UpperCamelCase ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
snake_case_ : Tuple = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_UpperCamelCase , _UpperCamelCase , clobber=_UpperCamelCase )
with pytest.warns(_UpperCamelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_UpperCamelCase ) == 1
assert (
str(warning_info[0].message )
== f'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 60 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCAmelCase_ = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
lowerCAmelCase_ = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
lowerCAmelCase_ = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
lowerCAmelCase_ = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
lowerCAmelCase_ = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
for tf_name, hf_name in patterns:
snake_case_ : Optional[int] = k.replace(_UpperCamelCase , _UpperCamelCase )
return k
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> BigBirdPegasusForConditionalGeneration:
"""simple docstring"""
snake_case_ : Tuple = BigBirdPegasusConfig(**_UpperCamelCase )
snake_case_ : List[str] = BigBirdPegasusForConditionalGeneration(_UpperCamelCase )
snake_case_ : Tuple = torch_model.state_dict()
snake_case_ : Tuple = {}
# separating decoder weights
snake_case_ : Dict = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
snake_case_ : Dict = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
snake_case_ : Optional[int] = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
snake_case_ : str = DECODER_PATTERNS
snake_case_ : Optional[int] = rename_state_dict_key(_UpperCamelCase , _UpperCamelCase )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
snake_case_ : List[str] = v.T
snake_case_ : Dict = torch.from_numpy(_UpperCamelCase )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
snake_case_ : Union[str, Any] = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
snake_case_ : Optional[Any] = REMAINING_PATTERNS
snake_case_ : int = rename_state_dict_key(_UpperCamelCase , _UpperCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
snake_case_ : List[str] = v.T
snake_case_ : Dict = torch.from_numpy(_UpperCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
snake_case_ : Dict = mapping['''model.embed_positions.weight''']
snake_case_ : Tuple = mapping.pop('''model.embed_positions.weight''' )
snake_case_ , snake_case_ : Union[str, Any] = torch_model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
snake_case_ : Union[str, Any] = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : str = tf.train.list_variables(_UpperCamelCase )
snake_case_ : List[str] = {}
snake_case_ : int = ['''global_step''']
for name, shape in tqdm(_UpperCamelCase , desc='''converting tf checkpoint to dict''' ):
snake_case_ : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case_ : Any = tf.train.load_variable(_UpperCamelCase , _UpperCamelCase )
snake_case_ : int = array
return tf_weights
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : List[Any] = get_tf_weights_as_numpy(_UpperCamelCase )
snake_case_ : Optional[Any] = convert_bigbird_pegasus(_UpperCamelCase , _UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 60 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[Any] = '''encoder-decoder'''
lowerCamelCase_ : Optional[Any] = True
def __init__(self , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__magic_name__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case_ : Any = kwargs.pop('''encoder''' )
snake_case_ : Tuple = encoder_config.pop('''model_type''' )
snake_case_ : Union[str, Any] = kwargs.pop('''decoder''' )
snake_case_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
snake_case_ : Optional[int] = AutoConfig.for_model(__magic_name__ , **__magic_name__ )
snake_case_ : List[str] = AutoConfig.for_model(__magic_name__ , **__magic_name__ )
snake_case_ : Any = True
@classmethod
def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
snake_case_ : Tuple = True
snake_case_ : Optional[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : str = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.encoder.to_dict()
snake_case_ : Dict = self.decoder.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 60 | 1 |
def lowerCamelCase_ ( _UpperCamelCase ) -> list:
"""simple docstring"""
if n_term == "":
return []
snake_case_ : list = []
for temp in range(int(_UpperCamelCase ) ):
series.append(f'''1/{temp + 1}''' if series else '''1''' )
return series
if __name__ == "__main__":
lowerCAmelCase_ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 60 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = question_encoder
snake_case_ : Optional[int] = generator
snake_case_ : Optional[Any] = self.question_encoder
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
if os.path.isfile(__magic_name__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
snake_case_ : str = os.path.join(__magic_name__ , '''question_encoder_tokenizer''' )
snake_case_ : List[Any] = os.path.join(__magic_name__ , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(__magic_name__ )
self.generator.save_pretrained(__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
snake_case_ : List[str] = kwargs.pop('''config''' , __magic_name__ )
if config is None:
snake_case_ : int = RagConfig.from_pretrained(__magic_name__ )
snake_case_ : Dict = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
snake_case_ : Dict = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=__magic_name__ , generator=__magic_name__ )
def __call__(self , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
return self.current_tokenizer(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
return self.generator.batch_decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
return self.generator.decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.question_encoder
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.generator
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ) -> BatchEncoding:
'''simple docstring'''
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , __magic_name__ , )
if max_length is None:
snake_case_ : Dict = self.current_tokenizer.model_max_length
snake_case_ : List[str] = self(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case_ : Optional[int] = self.current_tokenizer.model_max_length
snake_case_ : Union[str, Any] = self(
text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
snake_case_ : str = labels['''input_ids''']
return model_inputs
| 60 | 1 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCAmelCase_ = ['''text''', '''image''', '''audio''']
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
snake_case_ : Tuple = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
inputs.append(create_inputs(_UpperCamelCase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
snake_case_ : Dict = []
for output in outputs:
if isinstance(_UpperCamelCase , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(_UpperCamelCase , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(_UpperCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class __lowerCAmelCase :
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
snake_case_ : Any = self.tool.inputs
for _input in inputs:
if isinstance(_input , __magic_name__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case_ : Dict = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Union[str, Any] = create_inputs(self.tool.inputs )
snake_case_ : Tuple = self.tool(*__magic_name__ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case_ : Optional[Any] = [outputs]
self.assertListEqual(output_types(__magic_name__ ) , self.tool.outputs )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = create_inputs(self.tool.inputs )
snake_case_ : int = self.tool(*__magic_name__ )
if not isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : Tuple = [outputs]
self.assertEqual(len(__magic_name__ ) , len(self.tool.outputs ) )
for output, output_type in zip(__magic_name__ , self.tool.outputs ):
snake_case_ : Any = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__magic_name__ , __magic_name__ ) )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = create_inputs(self.tool.inputs )
snake_case_ : Optional[int] = []
for _input, input_type in zip(__magic_name__ , self.tool.inputs ):
if isinstance(__magic_name__ , __magic_name__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case_ : List[str] = self.tool(*__magic_name__ )
if not isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : Dict = [outputs]
self.assertEqual(len(__magic_name__ ) , len(self.tool.outputs ) )
| 60 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=30 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10 , __magic_name__=0.02 , __magic_name__=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : List[Any] = image_size
snake_case_ : Optional[int] = patch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = is_training
snake_case_ : List[Any] = use_labels
snake_case_ : Optional[int] = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : List[str] = type_sequence_label_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case_ : Any = (image_size // patch_size) ** 2
snake_case_ : int = num_patches + 1
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : List[Any] = None
if self.use_labels:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : int = self.get_config()
return config, pixel_values, labels
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : int = ViTMSNModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : List[str] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.type_sequence_label_size
snake_case_ : Tuple = ViTMSNForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : Any = model(__magic_name__ , labels=__magic_name__ )
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' )
print('''Labels: {labels}''' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ : Optional[int] = 1
snake_case_ : List[str] = ViTMSNForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : Any = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs
snake_case_ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _a, _a, unittest.TestCase ):
lowerCamelCase_ : List[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowerCamelCase_ : Optional[int] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ : int = False
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : int = False
lowerCamelCase_ : Optional[int] = False
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = ViTMSNModelTester(self )
snake_case_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''' )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Any = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(__magic_name__ )
snake_case_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Optional[int] = [*signature.parameters.keys()]
snake_case_ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = ViTMSNModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None
@slow
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
torch.manual_seed(2 )
snake_case_ : List[str] = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(__magic_name__ )
snake_case_ : str = self.default_image_processor
snake_case_ : str = prepare_img()
snake_case_ : int = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ )
# forward pass
with torch.no_grad():
snake_case_ : Optional[int] = model(**__magic_name__ )
# verify the logits
snake_case_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
snake_case_ : List[Any] = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
| 60 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( _UpperCamelCase ) -> list[int]:
"""simple docstring"""
return [ord(_UpperCamelCase ) - 96 for elem in plain]
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
snake_case_ : List[Any] = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , _UpperCamelCase )
print('''Decoded:''' , decode(_UpperCamelCase ) )
if __name__ == "__main__":
main()
| 60 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : List[Any] = '''efficientnet'''
def __init__(self , __magic_name__ = 3 , __magic_name__ = 600 , __magic_name__ = 2.0 , __magic_name__ = 3.1 , __magic_name__ = 8 , __magic_name__ = [3, 3, 5, 3, 5, 5, 3] , __magic_name__ = [32, 16, 24, 40, 80, 112, 192] , __magic_name__ = [16, 24, 40, 80, 112, 192, 320] , __magic_name__ = [] , __magic_name__ = [1, 2, 2, 2, 1, 2, 1] , __magic_name__ = [1, 2, 2, 3, 3, 4, 1] , __magic_name__ = [1, 6, 6, 6, 6, 6, 6] , __magic_name__ = 0.25 , __magic_name__ = "swish" , __magic_name__ = 2560 , __magic_name__ = "mean" , __magic_name__ = 0.02 , __magic_name__ = 0.001 , __magic_name__ = 0.99 , __magic_name__ = 0.5 , __magic_name__ = 0.2 , **__magic_name__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : List[str] = num_channels
snake_case_ : Tuple = image_size
snake_case_ : Union[str, Any] = width_coefficient
snake_case_ : Tuple = depth_coefficient
snake_case_ : Optional[Any] = depth_divisor
snake_case_ : Optional[int] = kernel_sizes
snake_case_ : str = in_channels
snake_case_ : Optional[Any] = out_channels
snake_case_ : int = depthwise_padding
snake_case_ : Optional[Any] = strides
snake_case_ : Any = num_block_repeats
snake_case_ : Optional[Any] = expand_ratios
snake_case_ : Union[str, Any] = squeeze_expansion_ratio
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Union[str, Any] = hidden_dim
snake_case_ : Any = pooling_type
snake_case_ : List[str] = initializer_range
snake_case_ : str = batch_norm_eps
snake_case_ : Optional[int] = batch_norm_momentum
snake_case_ : Optional[Any] = dropout_rate
snake_case_ : List[str] = drop_connect_rate
snake_case_ : Union[str, Any] = sum(__magic_name__ ) * 4
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = version.parse('''1.11''' )
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase (self ) -> float:
'''simple docstring'''
return 1e-5
| 60 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowerCAmelCase_ = (7_2_0, 1_2_8_0) # Height, Width
lowerCAmelCase_ = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowerCAmelCase_ = 1 / 1_0_0
lowerCAmelCase_ = ''''''
lowerCAmelCase_ = ''''''
lowerCAmelCase_ = ''''''
lowerCAmelCase_ = 2_5_0
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
snake_case_ , snake_case_ : List[str] = get_dataset(_UpperCamelCase , _UpperCamelCase )
for index in range(_UpperCamelCase ):
snake_case_ : List[str] = random.sample(range(len(_UpperCamelCase ) ) , 4 )
snake_case_ , snake_case_ , snake_case_ : Optional[int] = update_image_and_anno(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , filter_scale=_UpperCamelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case_ : str = random_chars(32 )
snake_case_ : Optional[Any] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
snake_case_ : Optional[int] = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' , _UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
snake_case_ : Tuple = []
for anno in new_annos:
snake_case_ : Any = anno[3] - anno[1]
snake_case_ : Union[str, Any] = anno[4] - anno[2]
snake_case_ : Any = anno[1] + width / 2
snake_case_ : Dict = anno[2] + height / 2
snake_case_ : Optional[int] = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(_UpperCamelCase )
with open(f'''{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> tuple[list, list]:
"""simple docstring"""
snake_case_ : Union[str, Any] = []
snake_case_ : int = []
for label_file in glob.glob(os.path.join(_UpperCamelCase , '''*.txt''' ) ):
snake_case_ : int = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(_UpperCamelCase ) as in_file:
snake_case_ : str = in_file.readlines()
snake_case_ : str = os.path.join(_UpperCamelCase , f'''{label_name}.jpg''' )
snake_case_ : List[Any] = []
for obj_list in obj_lists:
snake_case_ : int = obj_list.rstrip('''\n''' ).split(''' ''' )
snake_case_ : List[Any] = float(obj[1] ) - float(obj[3] ) / 2
snake_case_ : Tuple = float(obj[2] ) - float(obj[4] ) / 2
snake_case_ : Dict = float(obj[1] ) + float(obj[3] ) / 2
snake_case_ : Tuple = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_UpperCamelCase )
labels.append(_UpperCamelCase )
return img_paths, labels
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
snake_case_ : Tuple = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
snake_case_ : List[str] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case_ : Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case_ : Tuple = int(scale_x * output_size[1] )
snake_case_ : Dict = int(scale_y * output_size[0] )
snake_case_ : Dict = []
snake_case_ : List[str] = []
for i, index in enumerate(_UpperCamelCase ):
snake_case_ : List[Any] = all_img_list[index]
path_list.append(_UpperCamelCase )
snake_case_ : Any = all_annos[index]
snake_case_ : Optional[Any] = cva.imread(_UpperCamelCase )
if i == 0: # top-left
snake_case_ : Optional[Any] = cva.resize(_UpperCamelCase , (divid_point_x, divid_point_y) )
snake_case_ : Any = img
for bbox in img_annos:
snake_case_ : Any = bbox[1] * scale_x
snake_case_ : Optional[int] = bbox[2] * scale_y
snake_case_ : List[Any] = bbox[3] * scale_x
snake_case_ : Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
snake_case_ : Tuple = cva.resize(_UpperCamelCase , (output_size[1] - divid_point_x, divid_point_y) )
snake_case_ : Union[str, Any] = img
for bbox in img_annos:
snake_case_ : Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
snake_case_ : List[Any] = bbox[2] * scale_y
snake_case_ : str = scale_x + bbox[3] * (1 - scale_x)
snake_case_ : List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
snake_case_ : str = cva.resize(_UpperCamelCase , (divid_point_x, output_size[0] - divid_point_y) )
snake_case_ : str = img
for bbox in img_annos:
snake_case_ : Tuple = bbox[1] * scale_x
snake_case_ : Any = scale_y + bbox[2] * (1 - scale_y)
snake_case_ : str = bbox[3] * scale_x
snake_case_ : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
snake_case_ : Optional[int] = cva.resize(
_UpperCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
snake_case_ : int = img
for bbox in img_annos:
snake_case_ : List[str] = scale_x + bbox[1] * (1 - scale_x)
snake_case_ : Tuple = scale_y + bbox[2] * (1 - scale_y)
snake_case_ : Tuple = scale_x + bbox[3] * (1 - scale_x)
snake_case_ : Dict = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
snake_case_ : int = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
snake_case_ : Tuple = ascii_lowercase + digits
return "".join(random.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 60 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase_ = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int)
lowerCAmelCase_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
lowerCAmelCase_ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
lowerCAmelCase_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCAmelCase_ = [0] * args.vocab_size
for k, v in counter.items():
lowerCAmelCase_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 60 | 1 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = np.argmax(_UpperCamelCase , axis=1 )
return np.sum(outputs == labels )
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
with open(_UpperCamelCase , encoding='''utf_8''' ) as f:
snake_case_ : List[str] = csv.reader(_UpperCamelCase )
snake_case_ : Dict = []
next(_UpperCamelCase ) # skip the first line
for line in tqdm(_UpperCamelCase ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Dict = []
for dataset in encoded_datasets:
snake_case_ : List[str] = len(_UpperCamelCase )
snake_case_ : Dict = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
snake_case_ : Optional[Any] = np.zeros((n_batch, 2) , dtype=np.intaa )
snake_case_ : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
snake_case_ : Dict = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_UpperCamelCase ):
snake_case_ : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
snake_case_ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
snake_case_ : List[Any] = with_conta
snake_case_ : List[str] = with_conta
snake_case_ : Optional[Any] = len(_UpperCamelCase ) - 1
snake_case_ : int = len(_UpperCamelCase ) - 1
snake_case_ : Optional[Any] = with_conta
snake_case_ : Union[str, Any] = with_conta
snake_case_ : Any = mc_label
snake_case_ : List[str] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_UpperCamelCase ) for t in all_inputs ) )
return tensor_datasets
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
snake_case_ : str = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=_UpperCamelCase , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=_UpperCamelCase , default='''''' )
parser.add_argument('''--eval_dataset''' , type=_UpperCamelCase , default='''''' )
parser.add_argument('''--seed''' , type=_UpperCamelCase , default=42 )
parser.add_argument('''--num_train_epochs''' , type=_UpperCamelCase , default=3 )
parser.add_argument('''--train_batch_size''' , type=_UpperCamelCase , default=8 )
parser.add_argument('''--eval_batch_size''' , type=_UpperCamelCase , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=_UpperCamelCase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=_UpperCamelCase , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=_UpperCamelCase , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_UpperCamelCase , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=_UpperCamelCase , default=6.25E-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=_UpperCamelCase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=_UpperCamelCase , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=_UpperCamelCase , default=0.01 )
parser.add_argument('''--lm_coef''' , type=_UpperCamelCase , default=0.9 )
parser.add_argument('''--n_valid''' , type=_UpperCamelCase , default=374 )
parser.add_argument('''--server_ip''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' )
snake_case_ : List[Any] = parser.parse_args()
print(_UpperCamelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCamelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
snake_case_ : int = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case_ : str = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(_UpperCamelCase , _UpperCamelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
snake_case_ : Dict = ['''_start_''', '''_delimiter_''', '''_classify_''']
snake_case_ : Any = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_UpperCamelCase )
snake_case_ : Tuple = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
snake_case_ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_UpperCamelCase ) )
model.to(_UpperCamelCase )
# Load and encode the datasets
def tokenize_and_encode(_UpperCamelCase ):
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_UpperCamelCase ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
return obj
return [tokenize_and_encode(_UpperCamelCase ) for o in obj]
logger.info('''Encoding dataset...''' )
snake_case_ : Tuple = load_rocstories_dataset(args.train_dataset )
snake_case_ : Dict = load_rocstories_dataset(args.eval_dataset )
snake_case_ : Optional[Any] = (train_dataset, eval_dataset)
snake_case_ : Union[str, Any] = tokenize_and_encode(_UpperCamelCase )
# Compute the max input length for the Transformer
snake_case_ : Dict = model.config.n_positions // 2 - 2
snake_case_ : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
snake_case_ : str = min(_UpperCamelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
snake_case_ : int = pre_process_datasets(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
snake_case_ , snake_case_ : str = tensor_datasets[0], tensor_datasets[1]
snake_case_ : List[str] = TensorDataset(*_UpperCamelCase )
snake_case_ : int = RandomSampler(_UpperCamelCase )
snake_case_ : Union[str, Any] = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=args.train_batch_size )
snake_case_ : Any = TensorDataset(*_UpperCamelCase )
snake_case_ : str = SequentialSampler(_UpperCamelCase )
snake_case_ : int = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
snake_case_ : Tuple = args.max_steps
snake_case_ : Union[str, Any] = args.max_steps // (len(_UpperCamelCase ) // args.gradient_accumulation_steps) + 1
else:
snake_case_ : List[Any] = len(_UpperCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
snake_case_ : Optional[Any] = list(model.named_parameters() )
snake_case_ : List[str] = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
snake_case_ : str = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
snake_case_ : Optional[int] = AdamW(_UpperCamelCase , lr=args.learning_rate , eps=args.adam_epsilon )
snake_case_ : Optional[int] = get_linear_schedule_with_warmup(
_UpperCamelCase , num_warmup_steps=args.warmup_steps , num_training_steps=_UpperCamelCase )
if args.do_train:
snake_case_ , snake_case_ , snake_case_ : List[str] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
snake_case_ : Tuple = 0
snake_case_ : Union[str, Any] = 0
snake_case_ : Any = tqdm(_UpperCamelCase , desc='''Training''' )
for step, batch in enumerate(_UpperCamelCase ):
snake_case_ : Dict = tuple(t.to(_UpperCamelCase ) for t in batch )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : int = batch
snake_case_ : Union[str, Any] = model(_UpperCamelCase , mc_token_ids=_UpperCamelCase , lm_labels=_UpperCamelCase , mc_labels=_UpperCamelCase )
snake_case_ : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
snake_case_ : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
snake_case_ : int = '''Training loss: {:.2e} lr: {:.2e}'''.format(_UpperCamelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
snake_case_ : List[Any] = model.module if hasattr(_UpperCamelCase , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
snake_case_ : Any = os.path.join(args.output_dir , _UpperCamelCase )
snake_case_ : Optional[Any] = os.path.join(args.output_dir , _UpperCamelCase )
torch.save(model_to_save.state_dict() , _UpperCamelCase )
model_to_save.config.to_json_file(_UpperCamelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
snake_case_ : int = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
snake_case_ : str = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_UpperCamelCase )
if args.do_eval:
model.eval()
snake_case_ , snake_case_ : str = 0, 0
snake_case_ , snake_case_ : Any = 0, 0
for batch in tqdm(_UpperCamelCase , desc='''Evaluating''' ):
snake_case_ : Any = tuple(t.to(_UpperCamelCase ) for t in batch )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : int = batch
with torch.no_grad():
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = model(
_UpperCamelCase , mc_token_ids=_UpperCamelCase , lm_labels=_UpperCamelCase , mc_labels=_UpperCamelCase )
snake_case_ : Optional[Any] = mc_logits.detach().cpu().numpy()
snake_case_ : List[Any] = mc_labels.to('''cpu''' ).numpy()
snake_case_ : Any = accuracy(_UpperCamelCase , _UpperCamelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
snake_case_ : Any = eval_loss / nb_eval_steps
snake_case_ : Optional[int] = eval_accuracy / nb_eval_examples
snake_case_ : int = tr_loss / nb_tr_steps if args.do_train else None
snake_case_ : Optional[Any] = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
snake_case_ : List[str] = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(_UpperCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _UpperCamelCase , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 60 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__ = "▁" , __magic_name__ = True , __magic_name__ = "<unk>" , __magic_name__ = "</s>" , __magic_name__ = "<pad>" , ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
snake_case_ : List[str] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
snake_case_ : int = token_dict['''token''']
snake_case_ : Optional[int] = Tokenizer(Unigram() )
snake_case_ : int = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ),
normalizers.Lowercase(),
] )
snake_case_ : Optional[int] = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__magic_name__ , add_prefix_space=__magic_name__ ),
pre_tokenizers.Digits(individual_digits=__magic_name__ ),
pre_tokenizers.Punctuation(),
] )
snake_case_ : Tuple = decoders.Metaspace(replacement=__magic_name__ , add_prefix_space=__magic_name__ )
snake_case_ : Optional[Any] = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , )
snake_case_ : Optional[Any] = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(__magic_name__ , __magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ = 8000 , __magic_name__ = True , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = trainers.UnigramTrainer(
vocab_size=__magic_name__ , special_tokens=self.special_tokens_list , show_progress=__magic_name__ , )
if isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : Dict = [files]
self._tokenizer.train(__magic_name__ , trainer=__magic_name__ )
self.add_unk_id()
def lowerCamelCase (self , __magic_name__ , __magic_name__ = 8000 , __magic_name__ = True , ) -> int:
'''simple docstring'''
snake_case_ : Any = trainers.UnigramTrainer(
vocab_size=__magic_name__ , special_tokens=self.special_tokens_list , show_progress=__magic_name__ , )
self._tokenizer.train_from_iterator(__magic_name__ , trainer=__magic_name__ )
self.add_unk_id()
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = json.loads(self._tokenizer.to_str() )
snake_case_ : Union[str, Any] = self.special_tokens['''unk''']['''id''']
snake_case_ : Tuple = Tokenizer.from_str(json.dumps(__magic_name__ ) )
| 60 | 1 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : List[str] = torch.exp(_UpperCamelCase )
snake_case_ : List[str] = torch.sum(_UpperCamelCase , dim=1 ) # sum of exp(x_i)
snake_case_ : Union[str, Any] = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_UpperCamelCase ) - B / A
class __lowerCAmelCase ( nn.Module ):
def __init__(self , __magic_name__ ) -> Tuple:
'''simple docstring'''
super().__init__()
snake_case_ : Any = config.output_attentions
snake_case_ : Any = config.output_hidden_states
snake_case_ : Union[str, Any] = nn.ModuleList([BertLayer(__magic_name__ ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Tuple = nn.ModuleList([BertHighway(__magic_name__ ) for _ in range(config.num_hidden_layers )] )
snake_case_ : Any = [-1 for _ in range(config.num_hidden_layers )]
def lowerCamelCase (self , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
if (type(__magic_name__ ) is float) or (type(__magic_name__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
snake_case_ : Optional[Any] = x
else:
snake_case_ : str = x
def lowerCamelCase (self , __magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def lowerCamelCase (self , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = ()
snake_case_ : str = ()
snake_case_ : Any = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
snake_case_ : Optional[Any] = all_hidden_states + (hidden_states,)
snake_case_ : Union[str, Any] = layer_module(
__magic_name__ , __magic_name__ , head_mask[i] , __magic_name__ , __magic_name__ )
snake_case_ : List[str] = layer_outputs[0]
if self.output_attentions:
snake_case_ : List[str] = all_attentions + (layer_outputs[1],)
snake_case_ : int = (hidden_states,)
if self.output_hidden_states:
snake_case_ : str = current_outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : Optional[int] = current_outputs + (all_attentions,)
snake_case_ : Dict = self.highway[i](__magic_name__ )
# logits, pooled_output
if not self.training:
snake_case_ : List[str] = highway_exit[0]
snake_case_ : str = entropy(__magic_name__ )
snake_case_ : Any = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
snake_case_ : Optional[Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
snake_case_ : Optional[Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__magic_name__ , i + 1 )
else:
snake_case_ : List[str] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
snake_case_ : Dict = all_hidden_states + (hidden_states,)
snake_case_ : Union[str, Any] = (hidden_states,)
if self.output_hidden_states:
snake_case_ : int = outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ : Tuple = outputs + (all_attentions,)
snake_case_ : Dict = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''', _a, )
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__ ) -> int:
'''simple docstring'''
super().__init__(__magic_name__ )
snake_case_ : Any = config
snake_case_ : int = BertEmbeddings(__magic_name__ )
snake_case_ : str = DeeBertEncoder(__magic_name__ )
snake_case_ : Any = BertPooler(__magic_name__ )
self.init_weights()
def lowerCamelCase (self ) -> str:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return self.embeddings.word_embeddings
def lowerCamelCase (self , __magic_name__ ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = value
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__magic_name__ )
@add_start_docstrings_to_model_forward(__magic_name__ )
def lowerCamelCase (self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , ) -> List[Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
snake_case_ : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
snake_case_ : int = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
snake_case_ : Union[str, Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case_ : Optional[Any] = torch.ones(__magic_name__ , device=__magic_name__ )
if encoder_attention_mask is None:
snake_case_ : Optional[int] = torch.ones(__magic_name__ , device=__magic_name__ )
if token_type_ids is None:
snake_case_ : int = torch.zeros(__magic_name__ , dtype=torch.long , device=__magic_name__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case_ : torch.Tensor = self.get_extended_attention_mask(__magic_name__ , __magic_name__ , __magic_name__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
snake_case_ : Any = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
snake_case_ : List[str] = encoder_attention_mask[:, None, None, :]
snake_case_ : Optional[Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
snake_case_ : Any = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case_ : Any = self.get_head_mask(__magic_name__ , self.config.num_hidden_layers )
snake_case_ : Tuple = self.embeddings(
input_ids=__magic_name__ , position_ids=__magic_name__ , token_type_ids=__magic_name__ , inputs_embeds=__magic_name__ )
snake_case_ : List[Any] = self.encoder(
__magic_name__ , attention_mask=__magic_name__ , head_mask=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , )
snake_case_ : List[str] = encoder_outputs[0]
snake_case_ : List[Any] = self.pooler(__magic_name__ )
snake_case_ : Any = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = message
snake_case_ : Dict = exit_layer # start from 1!
class __lowerCAmelCase ( nn.Module ):
def __init__(self , __magic_name__ ) -> List[str]:
'''simple docstring'''
super().__init__()
snake_case_ : Optional[int] = BertPooler(__magic_name__ )
snake_case_ : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def lowerCamelCase (self , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = encoder_outputs[0]
snake_case_ : Dict = self.pooler(__magic_name__ )
# "return" pooler_output
# BertModel
snake_case_ : Any = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
snake_case_ : Dict = bmodel_output[1]
snake_case_ : List[Any] = self.dropout(__magic_name__ )
snake_case_ : Any = self.classifier(__magic_name__ )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''', _a, )
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__ ) -> int:
'''simple docstring'''
super().__init__(__magic_name__ )
snake_case_ : Dict = config.num_labels
snake_case_ : Union[str, Any] = config.num_hidden_layers
snake_case_ : str = DeeBertModel(__magic_name__ )
snake_case_ : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : Dict = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__magic_name__ )
def lowerCamelCase (self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=-1 , __magic_name__=False , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.num_layers
try:
snake_case_ : int = self.bert(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , position_ids=__magic_name__ , head_mask=__magic_name__ , inputs_embeds=__magic_name__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
snake_case_ : List[str] = outputs[1]
snake_case_ : List[str] = self.dropout(__magic_name__ )
snake_case_ : Tuple = self.classifier(__magic_name__ )
snake_case_ : Optional[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : Optional[int] = e.message
snake_case_ : Dict = e.exit_layer
snake_case_ : str = outputs[0]
if not self.training:
snake_case_ : Optional[int] = entropy(__magic_name__ )
snake_case_ : Optional[int] = []
snake_case_ : List[str] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : str = MSELoss()
snake_case_ : Optional[int] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : List[Any] = CrossEntropyLoss()
snake_case_ : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : str = []
for highway_exit in outputs[-1]:
snake_case_ : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(__magic_name__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[Any] = MSELoss()
snake_case_ : Union[str, Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Tuple = CrossEntropyLoss()
snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__magic_name__ )
if train_highway:
snake_case_ : Union[str, Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : int = (loss,) + outputs
if not self.training:
snake_case_ : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : List[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 60 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[Any] = [False] * len(_UpperCamelCase )
snake_case_ : int = [-1] * len(_UpperCamelCase )
def dfs(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Dict = True
snake_case_ : Dict = c
for u in graph[v]:
if not visited[u]:
dfs(_UpperCamelCase , 1 - c )
for i in range(len(_UpperCamelCase ) ):
if not visited[i]:
dfs(_UpperCamelCase , 0 )
for i in range(len(_UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowerCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 60 | 1 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(_UpperCamelCase , n - 1 , _UpperCamelCase ) * a) % mod
else:
snake_case_ : Optional[Any] = binary_exponentiation(_UpperCamelCase , n / 2 , _UpperCamelCase )
return (b * b) % mod
# a prime number
lowerCAmelCase_ = 7_0_1
lowerCAmelCase_ = 1_0_0_0_0_0_0_0_0_0
lowerCAmelCase_ = 1_0
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 60 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=False , ) -> int:
'''simple docstring'''
snake_case_ : int = size if size is not None else {'''height''': 20, '''width''': 20}
snake_case_ : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
snake_case_ : str = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : Dict = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Union[str, Any] = min_resolution
snake_case_ : Tuple = max_resolution
snake_case_ : str = do_resize
snake_case_ : Tuple = size
snake_case_ : int = do_center_crop
snake_case_ : Tuple = crop_size
snake_case_ : int = do_normalize
snake_case_ : Optional[Any] = image_mean
snake_case_ : List[str] = image_std
snake_case_ : str = do_reduce_labels
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
snake_case_ : Union[str, Any] = Image.open(dataset[0]['''file'''] )
snake_case_ : str = Image.open(dataset[1]['''file'''] )
return image, map
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
snake_case_ : str = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
snake_case_ : Optional[Any] = Image.open(ds[0]['''file'''] )
snake_case_ : Optional[Any] = Image.open(ds[1]['''file'''] )
snake_case_ : List[str] = Image.open(ds[2]['''file'''] )
snake_case_ : str = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : List[Any] = BeitImageProcessor if is_vision_available() else None
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : int = BeitImageProcessingTester(self )
@property
def lowerCamelCase (self ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''size''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(__magic_name__ , '''center_crop''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_normalize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''image_mean''' ) )
self.assertTrue(hasattr(__magic_name__ , '''image_std''' ) )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , __magic_name__ )
snake_case_ : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__magic_name__ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , __magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : Any = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : Optional[int] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : List[str] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
snake_case_ : Union[str, Any] = []
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
snake_case_ : Any = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
snake_case_ , snake_case_ : Optional[int] = prepare_semantic_single_inputs()
snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
snake_case_ , snake_case_ : Dict = prepare_semantic_batch_inputs()
snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
snake_case_ , snake_case_ : Tuple = prepare_semantic_single_inputs()
snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
snake_case_ : List[Any] = True
snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 60 | 1 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
snake_case_ : str = '''mock-s3-bucket'''
snake_case_ : str = f'''s3://{mock_bucket}'''
snake_case_ : Any = extract_path_from_uri(_UpperCamelCase )
assert dataset_path.startswith('''s3://''' ) is False
snake_case_ : Optional[Any] = '''./local/path'''
snake_case_ : List[str] = extract_path_from_uri(_UpperCamelCase )
assert dataset_path == new_dataset_path
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = is_remote_filesystem(_UpperCamelCase )
assert is_remote is True
snake_case_ : Union[str, Any] = fsspec.filesystem('''file''' )
snake_case_ : int = is_remote_filesystem(_UpperCamelCase )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
snake_case_ : Optional[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
snake_case_ : List[Any] = f'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCamelCase )
snake_case_ : Dict = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
snake_case_ : int = os.path.basename(_UpperCamelCase )
snake_case_ : Any = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f, open(_UpperCamelCase , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
snake_case_ : Any = compressed_file_paths[protocol]
snake_case_ : Any = '''dataset.jsonl'''
snake_case_ : Dict = f'''{protocol}://{member_file_path}::{compressed_file_path}'''
snake_case_ , *snake_case_ : Optional[Any] = fsspec.get_fs_token_paths(_UpperCamelCase )
assert fs.isfile(_UpperCamelCase )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = hf_api.dataset_info(_UpperCamelCase , token=_UpperCamelCase )
snake_case_ : List[str] = HfFileSystem(repo_info=_UpperCamelCase , token=_UpperCamelCase )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(_UpperCamelCase ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
snake_case_ : Tuple = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_UpperCamelCase , _UpperCamelCase , clobber=_UpperCamelCase )
with pytest.warns(_UpperCamelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_UpperCamelCase ) == 1
assert (
str(warning_info[0].message )
== f'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 60 |
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
lowerCAmelCase_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
lowerCAmelCase_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__="uniform_average" , __magic_name__=True ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = mean_squared_error(
__magic_name__ , __magic_name__ , sample_weight=__magic_name__ , multioutput=__magic_name__ , squared=__magic_name__ )
return {"mse": mse}
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __lowerCAmelCase :
lowerCamelCase_ : Any = None
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ : List[Any] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Optional[int] = os.path.join(__magic_name__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(__magic_name__ )
snake_case_ : str = self.feature_extraction_class.from_json_file(__magic_name__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : str = feat_extract_first.save_pretrained(__magic_name__ )[0]
check_json_file_has_correct_format(__magic_name__ )
snake_case_ : Dict = self.feature_extraction_class.from_pretrained(__magic_name__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.feature_extraction_class()
self.assertIsNotNone(__magic_name__ )
| 60 | 1 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = TypeVar('''DatasetType''', Dataset, IterableDataset)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'''is an empty dataset dictionary.''' )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.''' )
if i == 0:
snake_case_ , snake_case_ : int = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
else:
return _interleave_iterable_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'''is an empty dataset dictionary.''' )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.''' )
if i == 0:
snake_case_ , snake_case_ : List[str] = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
else:
return _concatenate_iterable_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
| 60 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase :
lowerCamelCase_ : str
lowerCamelCase_ : str = None
@staticmethod
def lowerCamelCase () -> Any:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowerCamelCase (cls ) -> List[Any]:
'''simple docstring'''
return F'''`pip install {cls.pip_package or cls.name}`'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[int] = '''optuna'''
@staticmethod
def lowerCamelCase () -> Union[str, Any]:
'''simple docstring'''
return is_optuna_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return run_hp_search_optuna(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return default_hp_space_optuna(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''ray'''
lowerCamelCase_ : List[str] = '''\'ray[tune]\''''
@staticmethod
def lowerCamelCase () -> List[Any]:
'''simple docstring'''
return is_ray_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_ray(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return default_hp_space_ray(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''sigopt'''
@staticmethod
def lowerCamelCase () -> Optional[int]:
'''simple docstring'''
return is_sigopt_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
return run_hp_search_sigopt(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
return default_hp_space_sigopt(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''wandb'''
@staticmethod
def lowerCamelCase () -> Dict:
'''simple docstring'''
return is_wandb_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_wandb(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return default_hp_space_wandb(__magic_name__ )
lowerCAmelCase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
snake_case_ : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_UpperCamelCase ) > 0:
snake_case_ : Dict = available_backends[0].name
if len(_UpperCamelCase ) > 1:
logger.info(
f'''{len(_UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 60 | 1 |
def lowerCamelCase_ ( _UpperCamelCase ) -> float:
"""simple docstring"""
snake_case_ : List[Any] = 0
while len(_UpperCamelCase ) > 1:
snake_case_ : List[Any] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
snake_case_ : str = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list:
"""simple docstring"""
snake_case_ : Tuple = len(_UpperCamelCase )
snake_case_ : Union[str, Any] = [[0] * n for i in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
snake_case_ : Any = y_points[i]
for i in range(2 , _UpperCamelCase ):
for j in range(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Optional[int] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 | 1 |
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ , snake_case_ : Tuple = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
snake_case_ : List[str] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1)
# convert to its negative
lowerCAmelCase_ = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 60 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 | 1 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
while b:
snake_case_ , snake_case_ : Union[str, Any] = b, a % b
return a
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(_UpperCamelCase , a % b )
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 60 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return getitem, k
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
return setitem, k, v
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
return delitem, k
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) -> str:
"""simple docstring"""
try:
return fun(_UpperCamelCase , *_UpperCamelCase ), None
except Exception as e:
return None, e
lowerCAmelCase_ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCAmelCase_ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCAmelCase_ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCAmelCase_ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCAmelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCAmelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
snake_case_ : Any = HashMap(initial_block_size=4 )
snake_case_ : Union[str, Any] = {}
for _, (fun, *args) in enumerate(_UpperCamelCase ):
snake_case_ , snake_case_ : str = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
assert my_res == py_res
assert str(_UpperCamelCase ) == str(_UpperCamelCase )
assert set(_UpperCamelCase ) == set(_UpperCamelCase )
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
assert set(my.items() ) == set(py.items() )
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
def is_public(_UpperCamelCase ) -> bool:
return not name.startswith('''_''' )
snake_case_ : str = {name for name in dir({} ) if is_public(_UpperCamelCase )}
snake_case_ : str = {name for name in dir(HashMap() ) if is_public(_UpperCamelCase )}
assert dict_public_names > hash_public_names
| 60 | 1 |
def lowerCamelCase_ ( _UpperCamelCase = 1_000_000 ) -> int:
"""simple docstring"""
snake_case_ : Dict = 1
snake_case_ : Dict = 1
snake_case_ : List[str] = {1: 1}
for inputa in range(2 , _UpperCamelCase ):
snake_case_ : Dict = 0
snake_case_ : List[Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
snake_case_ : Dict = (3 * number) + 1
counter += 1
if inputa not in counters:
snake_case_ : Tuple = counter
if counter > pre_counter:
snake_case_ : int = inputa
snake_case_ : Dict = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 60 |
from __future__ import annotations
def lowerCamelCase_ ( _UpperCamelCase ) -> list:
"""simple docstring"""
if len(_UpperCamelCase ) == 0:
return []
snake_case_ , snake_case_ : Dict = min(_UpperCamelCase ), max(_UpperCamelCase )
snake_case_ : List[str] = int(max_value - min_value ) + 1
snake_case_ : list[list] = [[] for _ in range(_UpperCamelCase )]
for i in my_list:
buckets[int(i - min_value )].append(_UpperCamelCase )
return [v for bucket in buckets for v in sorted(_UpperCamelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 60 | 1 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = BlenderbotSmallTokenizer
lowerCamelCase_ : Dict = False
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
super().setUp()
snake_case_ : Optional[int] = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
snake_case_ : str = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
snake_case_ : Tuple = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
snake_case_ : List[str] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
snake_case_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def lowerCamelCase (self , **__magic_name__ ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = '''adapt act apte'''
snake_case_ : int = '''adapt act apte'''
return input_text, output_text
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ : List[Any] = '''adapt act apte'''
snake_case_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te''']
snake_case_ : Any = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
snake_case_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
snake_case_ : str = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1384]
snake_case_ : int = '''I am a small frog.'''
snake_case_ : Dict = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids''']
snake_case_ : str = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : str = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
snake_case_ : Tuple = '''I am a small frog .'''
snake_case_ : Optional[Any] = '''.'''
snake_case_ : int = tok(__magic_name__ )['''input_ids''']
snake_case_ : Any = tok(__magic_name__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 60 |
import tensorflow as tf
from ...tf_utils import shape_list
class __lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=1 , __magic_name__=False , **__magic_name__ ) -> Dict:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : List[Any] = vocab_size
snake_case_ : Dict = d_embed
snake_case_ : Union[str, Any] = d_proj
snake_case_ : str = cutoffs + [vocab_size]
snake_case_ : int = [0] + self.cutoffs
snake_case_ : Optional[int] = div_val
snake_case_ : int = self.cutoffs[0]
snake_case_ : Any = len(self.cutoffs ) - 1
snake_case_ : Union[str, Any] = self.shortlist_size + self.n_clusters
snake_case_ : str = keep_order
snake_case_ : int = []
snake_case_ : Union[str, Any] = []
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
snake_case_ : Tuple = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_weight''' )
snake_case_ : Optional[Any] = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
snake_case_ : List[str] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' , )
self.out_projs.append(__magic_name__ )
else:
self.out_projs.append(__magic_name__ )
snake_case_ : Optional[Any] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , )
snake_case_ : List[str] = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case_ : Optional[Any] = self.d_embed // (self.div_val**i)
snake_case_ : int = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' )
self.out_projs.append(__magic_name__ )
snake_case_ : int = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , )
snake_case_ : Any = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(__magic_name__ )
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = x
if proj is not None:
snake_case_ : List[str] = tf.einsum('''ibd,ed->ibe''' , __magic_name__ , __magic_name__ )
return tf.einsum('''ibd,nd->ibn''' , __magic_name__ , __magic_name__ ) + b
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = shape_list(__magic_name__ )
snake_case_ : Tuple = tf.range(lp_size[0] , dtype=target.dtype )
snake_case_ : Dict = tf.stack([r, target] , 1 )
return tf.gather_nd(__magic_name__ , __magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=True , __magic_name__=False ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = 0
if self.n_clusters == 0:
snake_case_ : Any = self._logit(__magic_name__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
snake_case_ : Union[str, Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__magic_name__ , logits=__magic_name__ )
snake_case_ : Optional[Any] = tf.nn.log_softmax(__magic_name__ , axis=-1 )
else:
snake_case_ : Optional[int] = shape_list(__magic_name__ )
snake_case_ : int = []
snake_case_ : List[Any] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
snake_case_ : str = (target >= l_idx) & (target < r_idx)
snake_case_ : Dict = tf.where(__magic_name__ )
snake_case_ : List[str] = tf.boolean_mask(__magic_name__ , __magic_name__ ) - l_idx
if self.div_val == 1:
snake_case_ : Any = self.out_layers[0][0][l_idx:r_idx]
snake_case_ : Dict = self.out_layers[0][1][l_idx:r_idx]
else:
snake_case_ : Union[str, Any] = self.out_layers[i][0]
snake_case_ : int = self.out_layers[i][1]
if i == 0:
snake_case_ : List[Any] = tf.concat([cur_W, self.cluster_weight] , 0 )
snake_case_ : Tuple = tf.concat([cur_b, self.cluster_bias] , 0 )
snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[0] )
snake_case_ : Any = tf.nn.log_softmax(__magic_name__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ )
snake_case_ : Tuple = self._gather_logprob(__magic_name__ , __magic_name__ )
else:
snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[i] )
snake_case_ : Union[str, Any] = tf.nn.log_softmax(__magic_name__ )
snake_case_ : Optional[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
snake_case_ : Optional[int] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__magic_name__ )
if target is not None:
snake_case_ : Any = tf.boolean_mask(__magic_name__ , __magic_name__ )
snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ )
snake_case_ : str = self._gather_logprob(__magic_name__ , __magic_name__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__magic_name__ , -cur_logprob , shape_list(__magic_name__ ) )
snake_case_ : str = tf.concat(__magic_name__ , axis=-1 )
if target is not None:
if return_mean:
snake_case_ : int = tf.reduce_mean(__magic_name__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__magic_name__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__magic_name__ , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 60 | 1 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : int = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm1.weight''', f'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm1.bias''', f'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.weight''', f'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.bias''', f'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm2.weight''', f'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm2.bias''', f'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.weight''', f'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.bias''', f'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc2.weight''', f'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.mlp.fc2.bias''', f'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
snake_case_ : Tuple = state_dict.pop(f'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
snake_case_ : List[Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
snake_case_ : List[str] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
snake_case_ : Union[str, Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : Tuple = dct.pop(_UpperCamelCase )
snake_case_ : Any = val
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if "handwritten" in checkpoint_url:
snake_case_ : Tuple = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
snake_case_ : Tuple = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
snake_case_ : Optional[int] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = ViTConfig(image_size=384 , qkv_bias=_UpperCamelCase )
snake_case_ : str = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
snake_case_ : Optional[int] = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
snake_case_ : str = 1_024
snake_case_ : Optional[Any] = 4_096
snake_case_ : Union[str, Any] = 24
snake_case_ : Tuple = 16
snake_case_ : Any = 1_024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
snake_case_ : List[Any] = False
snake_case_ : Union[str, Any] = '''relu'''
snake_case_ : Dict = 1_024
snake_case_ : List[str] = True
snake_case_ : List[Any] = False
snake_case_ : List[Any] = False
# load HuggingFace model
snake_case_ : Any = ViTModel(_UpperCamelCase , add_pooling_layer=_UpperCamelCase )
snake_case_ : Any = TrOCRForCausalLM(_UpperCamelCase )
snake_case_ : int = VisionEncoderDecoderModel(encoder=_UpperCamelCase , decoder=_UpperCamelCase )
model.eval()
# load state_dict of original model, rename some keys
snake_case_ : Any = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='''cpu''' , check_hash=_UpperCamelCase )['''model''']
snake_case_ : Optional[Any] = create_rename_keys(_UpperCamelCase , _UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
snake_case_ : Dict = state_dict.pop(_UpperCamelCase )
if key.startswith('''decoder''' ) and "output_projection" not in key:
snake_case_ : List[str] = val
else:
snake_case_ : Optional[Any] = val
# load state dict
model.load_state_dict(_UpperCamelCase )
# Check outputs on an image
snake_case_ : int = ViTImageProcessor(size=encoder_config.image_size )
snake_case_ : Optional[Any] = RobertaTokenizer.from_pretrained('''roberta-large''' )
snake_case_ : Dict = TrOCRProcessor(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[int] = processor(images=prepare_img(_UpperCamelCase ) , return_tensors='''pt''' ).pixel_values
# verify logits
snake_case_ : Optional[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
snake_case_ : int = model(pixel_values=_UpperCamelCase , decoder_input_ids=_UpperCamelCase )
snake_case_ : Tuple = outputs.logits
snake_case_ : Union[str, Any] = torch.Size([1, 1, 50_265] )
if "trocr-base-handwritten" in checkpoint_url:
snake_case_ : Any = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
snake_case_ : Tuple = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
snake_case_ : str = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
snake_case_ : List[str] = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _UpperCamelCase , atol=1E-3 ), "First elements of logits not as expected"
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCamelCase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 60 |
import requests
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None:
"""simple docstring"""
snake_case_ : Tuple = {'''Content-Type''': '''application/json'''}
snake_case_ : Any = requests.post(_UpperCamelCase , json={'''text''': message_body} , headers=_UpperCamelCase )
if response.status_code != 200:
snake_case_ : List[Any] = (
'''Request to slack returned an error '''
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 60 | 1 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCAmelCase_ = datasets.utils.logging.get_logger(__name__)
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
lowerCamelCase_ : bool = None
lowerCamelCase_ : bool = None
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ):
lowerCamelCase_ : Dict = datasets.Audio()
lowerCamelCase_ : Tuple = '''audio'''
lowerCamelCase_ : Tuple = AudioFolderConfig
lowerCamelCase_ : List[str] # definition at the bottom of the script
lowerCamelCase_ : Any = AudioClassification(audio_column='''audio''', label_column='''label''' )
lowerCAmelCase_ = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
lowerCAmelCase_ = AUDIO_EXTENSIONS
| 60 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 | 1 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : str = ['''image_processor''', '''tokenizer''']
lowerCamelCase_ : Dict = '''FlavaImageProcessor'''
lowerCamelCase_ : List[str] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__(self , __magic_name__=None , __magic_name__=None , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __magic_name__ , )
snake_case_ : Optional[Any] = kwargs.pop('''feature_extractor''' )
snake_case_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__magic_name__ , __magic_name__ )
snake_case_ : Tuple = self.image_processor
def __call__(self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = True , __magic_name__ = False , __magic_name__ = False , __magic_name__ = None , __magic_name__ = 0 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = True , __magic_name__ = None , **__magic_name__ , ) -> Dict:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case_ : Optional[int] = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
if images is not None:
snake_case_ : Any = self.image_processor(
__magic_name__ , return_image_mask=__magic_name__ , return_codebook_pixels=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
if text is not None and images is not None:
encoding.update(__magic_name__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = self.tokenizer.model_input_names
snake_case_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase (self ) -> str:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __magic_name__ , )
return self.image_processor_class
@property
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __magic_name__ , )
return self.image_processor
| 60 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''owlvit_text_model'''
def __init__(self , __magic_name__=4_9408 , __magic_name__=512 , __magic_name__=2048 , __magic_name__=12 , __magic_name__=8 , __magic_name__=16 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=0 , __magic_name__=4_9406 , __magic_name__=4_9407 , **__magic_name__ , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
snake_case_ : int = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[Any] = intermediate_size
snake_case_ : str = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : str = hidden_act
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Dict = attention_dropout
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : int = initializer_factor
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : str = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
snake_case_ : str = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = '''owlvit_vision_model'''
def __init__(self , __magic_name__=768 , __magic_name__=3072 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3 , __magic_name__=768 , __magic_name__=32 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , **__magic_name__ , ) -> int:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : List[Any] = num_channels
snake_case_ : Union[str, Any] = image_size
snake_case_ : Dict = patch_size
snake_case_ : List[Any] = hidden_act
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Dict = attention_dropout
snake_case_ : List[str] = initializer_range
snake_case_ : List[Any] = initializer_factor
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
snake_case_ : str = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = '''owlvit'''
lowerCamelCase_ : Optional[int] = True
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=512 , __magic_name__=2.6_592 , __magic_name__=True , **__magic_name__ , ) -> int:
'''simple docstring'''
super().__init__(**__magic_name__ )
if text_config is None:
snake_case_ : Tuple = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
snake_case_ : str = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
snake_case_ : str = OwlViTTextConfig(**__magic_name__ )
snake_case_ : Union[str, Any] = OwlViTVisionConfig(**__magic_name__ )
snake_case_ : Any = projection_dim
snake_case_ : Union[str, Any] = logit_scale_init_value
snake_case_ : str = return_dict
snake_case_ : Any = 1.0
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : Optional[Any] = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = {}
snake_case_ : Union[str, Any] = text_config
snake_case_ : Optional[Any] = vision_config
return cls.from_dict(__magic_name__ , **__magic_name__ )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : List[Any] = self.text_config.to_dict()
snake_case_ : List[Any] = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
class __lowerCAmelCase ( _a ):
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def lowerCamelCase (self ) -> float:
'''simple docstring'''
return 1e-4
def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Dict = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__magic_name__ , seq_length=__magic_name__ , framework=__magic_name__ )
snake_case_ : List[str] = super().generate_dummy_inputs(
processor.image_processor , batch_size=__magic_name__ , framework=__magic_name__ )
return {**text_input_dict, **image_input_dict}
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return 14
| 60 | 1 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCAmelCase_ = HfApi()
lowerCAmelCase_ = {}
# fmt: off
lowerCAmelCase_ = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
lowerCAmelCase_ = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
lowerCAmelCase_ = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
lowerCAmelCase_ = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
lowerCAmelCase_ = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
lowerCAmelCase_ = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
lowerCAmelCase_ = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
lowerCAmelCase_ = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
lowerCAmelCase_ = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
lowerCAmelCase_ = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
lowerCAmelCase_ = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
lowerCAmelCase_ = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
lowerCAmelCase_ = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
lowerCAmelCase_ = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
lowerCAmelCase_ = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
lowerCAmelCase_ = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCAmelCase_ = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('''CompVis'''):
lowerCAmelCase_ = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
lowerCAmelCase_ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCAmelCase_ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCAmelCase_ = torch.tensor([1_0] * noise.shape[0])
with torch.no_grad():
lowerCAmelCase_ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :3_0], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1e-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 60 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : Tuple = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase_ : Union[str, Any] = ['''accelerate''', '''launch''']
lowerCamelCase_ : Tuple = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase_ : Tuple = '''default_config.yaml'''
lowerCamelCase_ : str = config_folder / config_file
lowerCamelCase_ : List[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase_ : Dict = Path('''tests/test_configs''' )
@classmethod
def lowerCamelCase (cls ) -> Dict:
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowerCamelCase (cls ) -> Any:
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=__magic_name__ ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(__magic_name__ ), self.test_file_path] , env=os.environ.copy() )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : List[str] = '''test-tpu'''
lowerCamelCase_ : Dict = '''us-central1-a'''
lowerCamelCase_ : Any = '''ls'''
lowerCamelCase_ : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase_ : Tuple = '''cd /usr/share'''
lowerCamelCase_ : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase_ : List[Any] = '''Running gcloud compute tpus tpu-vm ssh'''
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : int = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=__magic_name__ )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Any = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : str = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Tuple = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
| 60 | 1 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : List[str] = len(set_a.intersection(_UpperCamelCase ) )
if alternative_union:
snake_case_ : Any = len(_UpperCamelCase ) + len(_UpperCamelCase )
else:
snake_case_ : Dict = len(set_a.union(_UpperCamelCase ) )
return intersection / union
if isinstance(_UpperCamelCase , (list, tuple) ) and isinstance(_UpperCamelCase , (list, tuple) ):
snake_case_ : List[Any] = [element for element in set_a if element in set_b]
if alternative_union:
snake_case_ : Union[str, Any] = len(_UpperCamelCase ) + len(_UpperCamelCase )
return len(_UpperCamelCase ) / union
else:
snake_case_ : Any = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCamelCase ) / len(_UpperCamelCase )
return len(_UpperCamelCase ) / len(_UpperCamelCase )
return None
if __name__ == "__main__":
lowerCAmelCase_ = {'''a''', '''b''', '''c''', '''d''', '''e'''}
lowerCAmelCase_ = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 60 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__=None , **__magic_name__ ) -> Dict:
'''simple docstring'''
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , __magic_name__ , )
super().__init__(args=__magic_name__ , **__magic_name__ )
| 60 | 1 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = None ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case_ : Any = pad_token_id
snake_case_ : Any = max_length
snake_case_ : Optional[Any] = vocab
snake_case_ : Dict = merges
snake_case_ : Dict = BytePairTokenizer(__magic_name__ , __magic_name__ , sequence_length=__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , *__magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = [''' '''.join(__magic_name__ ) for m in tokenizer.bpe_ranks.keys()]
snake_case_ : List[Any] = tokenizer.get_vocab()
return cls(__magic_name__ , __magic_name__ , *__magic_name__ , **__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = GPTaTokenizer.from_pretrained(__magic_name__ , *__magic_name__ , **__magic_name__ )
return cls.from_tokenizer(__magic_name__ , *__magic_name__ , **__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ ) -> str:
'''simple docstring'''
return cls(**__magic_name__ )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.tf_tokenizer(__magic_name__ )
snake_case_ : Optional[Any] = tf.ones_like(__magic_name__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
snake_case_ : Optional[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
snake_case_ , snake_case_ : Any = pad_model_inputs(
__magic_name__ , max_seq_length=__magic_name__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 60 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
snake_case_ : str = '''mock-s3-bucket'''
snake_case_ : str = f'''s3://{mock_bucket}'''
snake_case_ : Any = extract_path_from_uri(_UpperCamelCase )
assert dataset_path.startswith('''s3://''' ) is False
snake_case_ : Optional[Any] = '''./local/path'''
snake_case_ : List[str] = extract_path_from_uri(_UpperCamelCase )
assert dataset_path == new_dataset_path
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = is_remote_filesystem(_UpperCamelCase )
assert is_remote is True
snake_case_ : Union[str, Any] = fsspec.filesystem('''file''' )
snake_case_ : int = is_remote_filesystem(_UpperCamelCase )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
snake_case_ : Optional[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
snake_case_ : List[Any] = f'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCamelCase )
snake_case_ : Dict = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
snake_case_ : int = os.path.basename(_UpperCamelCase )
snake_case_ : Any = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f, open(_UpperCamelCase , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
snake_case_ : Any = compressed_file_paths[protocol]
snake_case_ : Any = '''dataset.jsonl'''
snake_case_ : Dict = f'''{protocol}://{member_file_path}::{compressed_file_path}'''
snake_case_ , *snake_case_ : Optional[Any] = fsspec.get_fs_token_paths(_UpperCamelCase )
assert fs.isfile(_UpperCamelCase )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = hf_api.dataset_info(_UpperCamelCase , token=_UpperCamelCase )
snake_case_ : List[str] = HfFileSystem(repo_info=_UpperCamelCase , token=_UpperCamelCase )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(_UpperCamelCase ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
snake_case_ : Tuple = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_UpperCamelCase , _UpperCamelCase , clobber=_UpperCamelCase )
with pytest.warns(_UpperCamelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_UpperCamelCase ) == 1
assert (
str(warning_info[0].message )
== f'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 60 | 1 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]:
"""simple docstring"""
for param in module.parameters():
snake_case_ : Dict = False
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
snake_case_ : Tuple = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : List[str] = plt.imshow(_UpperCamelCase )
fig.axes.get_xaxis().set_visible(_UpperCamelCase )
fig.axes.get_yaxis().set_visible(_UpperCamelCase )
plt.show()
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : List[str] = datetime.now()
snake_case_ : Optional[Any] = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 60 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[Any] = '''encoder-decoder'''
lowerCamelCase_ : Optional[Any] = True
def __init__(self , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__magic_name__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case_ : Any = kwargs.pop('''encoder''' )
snake_case_ : Tuple = encoder_config.pop('''model_type''' )
snake_case_ : Union[str, Any] = kwargs.pop('''decoder''' )
snake_case_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
snake_case_ : Optional[int] = AutoConfig.for_model(__magic_name__ , **__magic_name__ )
snake_case_ : List[str] = AutoConfig.for_model(__magic_name__ , **__magic_name__ )
snake_case_ : Any = True
@classmethod
def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
snake_case_ : Tuple = True
snake_case_ : Optional[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : str = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.encoder.to_dict()
snake_case_ : Dict = self.decoder.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 60 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger()
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : nn.Module
lowerCamelCase_ : List[nn.Module] = field(default_factory=_a )
lowerCamelCase_ : list = field(default_factory=_a )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : str = len(list(m.modules() ) ) == 1 or isinstance(__magic_name__ , nn.Convad ) or isinstance(__magic_name__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__magic_name__ )
def __call__(self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__magic_name__ )
[x.remove() for x in self.handles]
return self
@property
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
return list(filter(lambda __magic_name__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : nn.Module
lowerCamelCase_ : nn.Module
lowerCamelCase_ : int = 0
lowerCamelCase_ : List = field(default_factory=_a )
lowerCamelCase_ : List = field(default_factory=_a )
def __call__(self , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : int = Tracker(self.dest )(__magic_name__ ).parametrized
snake_case_ : Optional[Any] = Tracker(self.src )(__magic_name__ ).parametrized
snake_case_ : Optional[Any] = list(filter(lambda __magic_name__ : type(__magic_name__ ) not in self.src_skip , __magic_name__ ) )
snake_case_ : Dict = list(filter(lambda __magic_name__ : type(__magic_name__ ) not in self.dest_skip , __magic_name__ ) )
if len(__magic_name__ ) != len(__magic_name__ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(__magic_name__ )} operations while'''
F''' destination module has {len(__magic_name__ )}.''' )
for dest_m, src_m in zip(__magic_name__ , __magic_name__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = True ) -> int:
"""simple docstring"""
print(f'''Converting {name}...''' )
with torch.no_grad():
snake_case_ : int = timm.create_model(_UpperCamelCase , pretrained=_UpperCamelCase ).eval()
snake_case_ : Any = ResNetForImageClassification(_UpperCamelCase ).eval()
snake_case_ : List[str] = ModuleTransfer(src=_UpperCamelCase , dest=_UpperCamelCase )
snake_case_ : List[str] = torch.randn((1, 3, 224, 224) )
module_transfer(_UpperCamelCase )
assert torch.allclose(from_model(_UpperCamelCase ) , our_model(_UpperCamelCase ).logits ), "The model logits don't match the original one."
snake_case_ : Any = f'''resnet{"-".join(name.split("resnet" ) )}'''
print(_UpperCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=_UpperCamelCase , )
# we can use the convnext one
snake_case_ : Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=_UpperCamelCase , )
print(f'''Pushed {checkpoint_name}''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True ) -> str:
"""simple docstring"""
snake_case_ : str = '''imagenet-1k-id2label.json'''
snake_case_ : str = 1_000
snake_case_ : List[str] = (1, num_labels)
snake_case_ : int = '''huggingface/label-files'''
snake_case_ : Tuple = num_labels
snake_case_ : Optional[int] = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ : Union[str, Any] = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
snake_case_ : Union[str, Any] = idalabel
snake_case_ : str = {v: k for k, v in idalabel.items()}
snake_case_ : str = partial(_UpperCamelCase , num_labels=_UpperCamelCase , idalabel=_UpperCamelCase , labelaid=_UpperCamelCase )
snake_case_ : int = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(_UpperCamelCase , names_to_config[model_name] , _UpperCamelCase , _UpperCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 60 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = question_encoder
snake_case_ : Optional[int] = generator
snake_case_ : Optional[Any] = self.question_encoder
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
if os.path.isfile(__magic_name__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
snake_case_ : str = os.path.join(__magic_name__ , '''question_encoder_tokenizer''' )
snake_case_ : List[Any] = os.path.join(__magic_name__ , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(__magic_name__ )
self.generator.save_pretrained(__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
snake_case_ : List[str] = kwargs.pop('''config''' , __magic_name__ )
if config is None:
snake_case_ : int = RagConfig.from_pretrained(__magic_name__ )
snake_case_ : Dict = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
snake_case_ : Dict = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=__magic_name__ , generator=__magic_name__ )
def __call__(self , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
return self.current_tokenizer(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
return self.generator.batch_decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
return self.generator.decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.question_encoder
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.generator
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ) -> BatchEncoding:
'''simple docstring'''
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , __magic_name__ , )
if max_length is None:
snake_case_ : Dict = self.current_tokenizer.model_max_length
snake_case_ : List[str] = self(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case_ : Optional[int] = self.current_tokenizer.model_max_length
snake_case_ : Union[str, Any] = self(
text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
snake_case_ : str = labels['''input_ids''']
return model_inputs
| 60 | 1 |
import sys
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
snake_case_ : Union[str, Any] = len(_UpperCamelCase )
snake_case_ : List[Any] = [[0 for x in range(_UpperCamelCase )] for x in range(_UpperCamelCase )]
snake_case_ : List[Any] = [[0 for x in range(_UpperCamelCase )] for x in range(_UpperCamelCase )]
for chain_length in range(2 , _UpperCamelCase ):
for a in range(1 , n - chain_length + 1 ):
snake_case_ : Dict = a + chain_length - 1
snake_case_ : Tuple = sys.maxsize
for c in range(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Dict = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
snake_case_ : List[str] = cost
snake_case_ : int = c
return matrix, sol
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
if i == j:
print('''A''' + str(_UpperCamelCase ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(_UpperCamelCase , _UpperCamelCase , optimal_solution[i][j] )
print_optiomal_solution(_UpperCamelCase , optimal_solution[i][j] + 1 , _UpperCamelCase )
print(''')''' , end=''' ''' )
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Tuple = [30, 35, 15, 5, 10, 20, 25]
snake_case_ : str = len(_UpperCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
snake_case_ , snake_case_ : Optional[Any] = matrix_chain_order(_UpperCamelCase )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(_UpperCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 60 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=30 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10 , __magic_name__=0.02 , __magic_name__=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : List[Any] = image_size
snake_case_ : Optional[int] = patch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = is_training
snake_case_ : List[Any] = use_labels
snake_case_ : Optional[int] = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : List[str] = type_sequence_label_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case_ : Any = (image_size // patch_size) ** 2
snake_case_ : int = num_patches + 1
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : List[Any] = None
if self.use_labels:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : int = self.get_config()
return config, pixel_values, labels
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : int = ViTMSNModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : List[str] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.type_sequence_label_size
snake_case_ : Tuple = ViTMSNForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : Any = model(__magic_name__ , labels=__magic_name__ )
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' )
print('''Labels: {labels}''' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ : Optional[int] = 1
snake_case_ : List[str] = ViTMSNForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : Any = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs
snake_case_ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _a, _a, unittest.TestCase ):
lowerCamelCase_ : List[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowerCamelCase_ : Optional[int] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ : int = False
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : int = False
lowerCamelCase_ : Optional[int] = False
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = ViTMSNModelTester(self )
snake_case_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''' )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Any = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(__magic_name__ )
snake_case_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Optional[int] = [*signature.parameters.keys()]
snake_case_ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = ViTMSNModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None
@slow
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
torch.manual_seed(2 )
snake_case_ : List[str] = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(__magic_name__ )
snake_case_ : str = self.default_image_processor
snake_case_ : str = prepare_img()
snake_case_ : int = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ )
# forward pass
with torch.no_grad():
snake_case_ : Optional[int] = model(**__magic_name__ )
# verify the logits
snake_case_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
snake_case_ : List[Any] = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
| 60 | 1 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 60 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : List[Any] = '''efficientnet'''
def __init__(self , __magic_name__ = 3 , __magic_name__ = 600 , __magic_name__ = 2.0 , __magic_name__ = 3.1 , __magic_name__ = 8 , __magic_name__ = [3, 3, 5, 3, 5, 5, 3] , __magic_name__ = [32, 16, 24, 40, 80, 112, 192] , __magic_name__ = [16, 24, 40, 80, 112, 192, 320] , __magic_name__ = [] , __magic_name__ = [1, 2, 2, 2, 1, 2, 1] , __magic_name__ = [1, 2, 2, 3, 3, 4, 1] , __magic_name__ = [1, 6, 6, 6, 6, 6, 6] , __magic_name__ = 0.25 , __magic_name__ = "swish" , __magic_name__ = 2560 , __magic_name__ = "mean" , __magic_name__ = 0.02 , __magic_name__ = 0.001 , __magic_name__ = 0.99 , __magic_name__ = 0.5 , __magic_name__ = 0.2 , **__magic_name__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : List[str] = num_channels
snake_case_ : Tuple = image_size
snake_case_ : Union[str, Any] = width_coefficient
snake_case_ : Tuple = depth_coefficient
snake_case_ : Optional[Any] = depth_divisor
snake_case_ : Optional[int] = kernel_sizes
snake_case_ : str = in_channels
snake_case_ : Optional[Any] = out_channels
snake_case_ : int = depthwise_padding
snake_case_ : Optional[Any] = strides
snake_case_ : Any = num_block_repeats
snake_case_ : Optional[Any] = expand_ratios
snake_case_ : Union[str, Any] = squeeze_expansion_ratio
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Union[str, Any] = hidden_dim
snake_case_ : Any = pooling_type
snake_case_ : List[str] = initializer_range
snake_case_ : str = batch_norm_eps
snake_case_ : Optional[int] = batch_norm_momentum
snake_case_ : Optional[Any] = dropout_rate
snake_case_ : List[str] = drop_connect_rate
snake_case_ : Union[str, Any] = sum(__magic_name__ ) * 4
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Union[str, Any] = version.parse('''1.11''' )
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase (self ) -> float:
'''simple docstring'''
return 1e-5
| 60 | 1 |
from collections.abc import Iterable
from typing import Any
class __lowerCAmelCase :
def __init__(self , __magic_name__ = None ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = value
snake_case_ : Node | None = None # Added in order to delete a node easier
snake_case_ : Node | None = None
snake_case_ : Node | None = None
def __repr__(self ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class __lowerCAmelCase :
def __init__(self , __magic_name__ = None ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = root
def __str__(self ) -> str:
'''simple docstring'''
return str(self.root )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if new_children is not None: # reset its kids
snake_case_ : List[Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__magic_name__ ): # If it is the right children
snake_case_ : List[Any] = new_children
else:
snake_case_ : List[Any] = new_children
else:
snake_case_ : int = new_children
def lowerCamelCase (self , __magic_name__ ) -> bool:
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowerCamelCase (self ) -> bool:
'''simple docstring'''
return self.root is None
def lowerCamelCase (self , __magic_name__ ) -> None:
'''simple docstring'''
snake_case_ : Any = Node(__magic_name__ ) # create a new Node
if self.empty(): # if Tree is empty
snake_case_ : int = new_node # set its root
else: # Tree is not empty
snake_case_ : Optional[int] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
snake_case_ : str = new_node # We insert the new node in a leaf
break
else:
snake_case_ : int = parent_node.left
else:
if parent_node.right is None:
snake_case_ : Union[str, Any] = new_node
break
else:
snake_case_ : Optional[Any] = parent_node.right
snake_case_ : Optional[Any] = parent_node
def lowerCamelCase (self , *__magic_name__ ) -> None:
'''simple docstring'''
for value in values:
self.__insert(__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Node | None:
'''simple docstring'''
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
snake_case_ : int = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
snake_case_ : Tuple = node.left if value < node.value else node.right
return node
def lowerCamelCase (self , __magic_name__ = None ) -> Node | None:
'''simple docstring'''
if node is None:
if self.root is None:
return None
snake_case_ : Optional[int] = self.root
if not self.empty():
while node.right is not None:
snake_case_ : Dict = node.right
return node
def lowerCamelCase (self , __magic_name__ = None ) -> Node | None:
'''simple docstring'''
if node is None:
snake_case_ : Dict = self.root
if self.root is None:
return None
if not self.empty():
snake_case_ : Union[str, Any] = self.root
while node.left is not None:
snake_case_ : Tuple = node.left
return node
def lowerCamelCase (self , __magic_name__ ) -> None:
'''simple docstring'''
snake_case_ : int = self.search(__magic_name__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__magic_name__ , __magic_name__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(__magic_name__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__magic_name__ , node.left )
else:
snake_case_ : Optional[Any] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
snake_case_ : List[str] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowerCamelCase (self , __magic_name__ ) -> Iterable:
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowerCamelCase (self , __magic_name__=None ) -> Any:
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if node:
self.inorder(__magic_name__ , node.left )
arr.append(node.value )
self.inorder(__magic_name__ , node.right )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : list[int] = []
self.inorder(__magic_name__ , __magic_name__ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase_ ( _UpperCamelCase ) -> list[Node]:
"""simple docstring"""
snake_case_ : str = []
if curr_node is not None:
snake_case_ : Tuple = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
snake_case_ : Optional[int] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
snake_case_ : Optional[Any] = BinarySearchTree()
for i in testlist:
t.insert(_UpperCamelCase )
# Prints all the elements of the list in order traversal
print(_UpperCamelCase )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_UpperCamelCase )
print(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 60 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase_ = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int)
lowerCAmelCase_ = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
lowerCAmelCase_ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
lowerCAmelCase_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCAmelCase_ = [0] * args.vocab_size
for k, v in counter.items():
lowerCAmelCase_ = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 60 | 1 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass(frozen=_a )
class __lowerCAmelCase :
lowerCamelCase_ : str
lowerCamelCase_ : str
lowerCamelCase_ : Optional[str] = None
lowerCamelCase_ : Optional[str] = None
lowerCamelCase_ : Optional[str] = None
@dataclass(frozen=_a )
class __lowerCAmelCase :
lowerCamelCase_ : List[int]
lowerCamelCase_ : Optional[List[int]] = None
lowerCamelCase_ : Optional[List[int]] = None
lowerCamelCase_ : Optional[Union[int, float]] = None
lowerCamelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : List[InputFeatures]
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = False , ) -> int:
'''simple docstring'''
snake_case_ : List[str] = hans_processors[task]()
snake_case_ : str = os.path.join(
__magic_name__ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(__magic_name__ ) , __magic_name__ , ) , )
snake_case_ : List[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case_ , snake_case_ : Any = label_list[2], label_list[1]
snake_case_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ : List[Any] = cached_features_file + '''.lock'''
with FileLock(__magic_name__ ):
if os.path.exists(__magic_name__ ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
snake_case_ : int = torch.load(__magic_name__ )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
snake_case_ : List[str] = (
processor.get_dev_examples(__magic_name__ ) if evaluate else processor.get_train_examples(__magic_name__ )
)
logger.info('''Training examples: %s''' , len(__magic_name__ ) )
snake_case_ : List[str] = hans_convert_examples_to_features(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
logger.info('''Saving features into cached file %s''' , __magic_name__ )
torch.save(self.features , __magic_name__ )
def __len__(self ) -> List[str]:
'''simple docstring'''
return len(self.features )
def __getitem__(self , __magic_name__ ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class __lowerCAmelCase :
lowerCamelCase_ : List[InputFeatures]
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 128 , __magic_name__=False , __magic_name__ = False , ) -> List[str]:
'''simple docstring'''
snake_case_ : int = hans_processors[task]()
snake_case_ : int = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case_ , snake_case_ : Any = label_list[2], label_list[1]
snake_case_ : Any = label_list
snake_case_ : List[Any] = processor.get_dev_examples(__magic_name__ ) if evaluate else processor.get_train_examples(__magic_name__ )
snake_case_ : Dict = hans_convert_examples_to_features(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(__magic_name__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
snake_case_ : str = tf.data.Dataset.from_generator(
__magic_name__ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return self.dataset
def __len__(self ) -> int:
'''simple docstring'''
return len(self.features )
def __getitem__(self , __magic_name__ ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return self.label_list
class __lowerCAmelCase ( _a ):
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(__magic_name__ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def lowerCamelCase (self , __magic_name__ ) -> List[str]:
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(__magic_name__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = []
for i, line in enumerate(__magic_name__ ):
if i == 0:
continue
snake_case_ : List[Any] = '''%s-%s''' % (set_type, line[0])
snake_case_ : int = line[5]
snake_case_ : Tuple = line[6]
snake_case_ : Any = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
snake_case_ : List[str] = line[0]
examples.append(InputExample(guid=__magic_name__ , text_a=__magic_name__ , text_b=__magic_name__ , label=__magic_name__ , pairID=__magic_name__ ) )
return examples
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any:
"""simple docstring"""
snake_case_ : Optional[Any] = {label: i for i, label in enumerate(_UpperCamelCase )}
snake_case_ : Any = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCamelCase ) , desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
snake_case_ : int = tokenizer(
example.text_a , example.text_b , add_special_tokens=_UpperCamelCase , max_length=_UpperCamelCase , padding='''max_length''' , truncation=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , )
snake_case_ : List[Any] = label_map[example.label] if example.label in label_map else 0
snake_case_ : Any = int(example.pairID )
features.append(InputFeatures(**_UpperCamelCase , label=_UpperCamelCase , pairID=_UpperCamelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f'''guid: {example}''' )
logger.info(f'''features: {features[i]}''' )
return features
lowerCAmelCase_ = {
'''hans''': 3,
}
lowerCAmelCase_ = {
'''hans''': HansProcessor,
}
| 60 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__ = "▁" , __magic_name__ = True , __magic_name__ = "<unk>" , __magic_name__ = "</s>" , __magic_name__ = "<pad>" , ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
snake_case_ : List[str] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
snake_case_ : int = token_dict['''token''']
snake_case_ : Optional[int] = Tokenizer(Unigram() )
snake_case_ : int = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ),
normalizers.Lowercase(),
] )
snake_case_ : Optional[int] = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__magic_name__ , add_prefix_space=__magic_name__ ),
pre_tokenizers.Digits(individual_digits=__magic_name__ ),
pre_tokenizers.Punctuation(),
] )
snake_case_ : Tuple = decoders.Metaspace(replacement=__magic_name__ , add_prefix_space=__magic_name__ )
snake_case_ : Optional[Any] = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , )
snake_case_ : Optional[Any] = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(__magic_name__ , __magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ = 8000 , __magic_name__ = True , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = trainers.UnigramTrainer(
vocab_size=__magic_name__ , special_tokens=self.special_tokens_list , show_progress=__magic_name__ , )
if isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : Dict = [files]
self._tokenizer.train(__magic_name__ , trainer=__magic_name__ )
self.add_unk_id()
def lowerCamelCase (self , __magic_name__ , __magic_name__ = 8000 , __magic_name__ = True , ) -> int:
'''simple docstring'''
snake_case_ : Any = trainers.UnigramTrainer(
vocab_size=__magic_name__ , special_tokens=self.special_tokens_list , show_progress=__magic_name__ , )
self._tokenizer.train_from_iterator(__magic_name__ , trainer=__magic_name__ )
self.add_unk_id()
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = json.loads(self._tokenizer.to_str() )
snake_case_ : Union[str, Any] = self.special_tokens['''unk''']['''id''']
snake_case_ : Tuple = Tokenizer.from_str(json.dumps(__magic_name__ ) )
| 60 | 1 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : List[str] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
lowerCamelCase_ : Union[str, Any] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = AudioClassificationPipeline(model=__magic_name__ , feature_extractor=__magic_name__ )
# test with a raw waveform
snake_case_ : Any = np.zeros((3_4000,) )
snake_case_ : Dict = np.zeros((1_4000,) )
return audio_classifier, [audioa, audio]
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ , snake_case_ : str = examples
snake_case_ : Optional[Any] = audio_classifier(__magic_name__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
__magic_name__ , [
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
] , )
snake_case_ : Any = audio_classifier(__magic_name__ , top_k=1 )
self.assertEqual(
__magic_name__ , [
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
] , )
self.run_torchaudio(__magic_name__ )
@require_torchaudio
def lowerCamelCase (self , __magic_name__ ) -> Any:
'''simple docstring'''
import datasets
# test with a local file
snake_case_ : Union[str, Any] = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
snake_case_ : Any = dataset[0]['''audio''']['''array''']
snake_case_ : Union[str, Any] = audio_classifier(__magic_name__ )
self.assertEqual(
__magic_name__ , [
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
{'''score''': ANY(__magic_name__ ), '''label''': ANY(__magic_name__ )},
] , )
@require_torch
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = '''anton-l/wav2vec2-random-tiny-classifier'''
snake_case_ : Optional[Any] = pipeline('''audio-classification''' , model=__magic_name__ )
snake_case_ : Dict = np.ones((8000,) )
snake_case_ : Any = audio_classifier(__magic_name__ , top_k=4 )
snake_case_ : Optional[int] = [
{'''score''': 0.0_842, '''label''': '''no'''},
{'''score''': 0.0_838, '''label''': '''up'''},
{'''score''': 0.0_837, '''label''': '''go'''},
{'''score''': 0.0_834, '''label''': '''right'''},
]
snake_case_ : int = [
{'''score''': 0.0_845, '''label''': '''stop'''},
{'''score''': 0.0_844, '''label''': '''on'''},
{'''score''': 0.0_841, '''label''': '''right'''},
{'''score''': 0.0_834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(__magic_name__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
snake_case_ : List[str] = {'''array''': np.ones((8000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
snake_case_ : Tuple = audio_classifier(__magic_name__ , top_k=4 )
self.assertIn(nested_simplify(__magic_name__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
import datasets
snake_case_ : Tuple = '''superb/wav2vec2-base-superb-ks'''
snake_case_ : Union[str, Any] = pipeline('''audio-classification''' , model=__magic_name__ )
snake_case_ : Optional[Any] = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
snake_case_ : str = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
snake_case_ : str = audio_classifier(__magic_name__ , top_k=4 )
self.assertEqual(
nested_simplify(__magic_name__ , decimals=3 ) , [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
pass
| 60 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[Any] = [False] * len(_UpperCamelCase )
snake_case_ : int = [-1] * len(_UpperCamelCase )
def dfs(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Dict = True
snake_case_ : Dict = c
for u in graph[v]:
if not visited[u]:
dfs(_UpperCamelCase , 1 - c )
for i in range(len(_UpperCamelCase ) ):
if not visited[i]:
dfs(_UpperCamelCase , 0 )
for i in range(len(_UpperCamelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowerCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowerCAmelCase_ = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=False , ) -> int:
'''simple docstring'''
snake_case_ : int = size if size is not None else {'''height''': 20, '''width''': 20}
snake_case_ : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
snake_case_ : str = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : Dict = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Union[str, Any] = min_resolution
snake_case_ : Tuple = max_resolution
snake_case_ : str = do_resize
snake_case_ : Tuple = size
snake_case_ : int = do_center_crop
snake_case_ : Tuple = crop_size
snake_case_ : int = do_normalize
snake_case_ : Optional[Any] = image_mean
snake_case_ : List[str] = image_std
snake_case_ : str = do_reduce_labels
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
snake_case_ : Union[str, Any] = Image.open(dataset[0]['''file'''] )
snake_case_ : str = Image.open(dataset[1]['''file'''] )
return image, map
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
snake_case_ : str = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
snake_case_ : Optional[Any] = Image.open(ds[0]['''file'''] )
snake_case_ : Optional[Any] = Image.open(ds[1]['''file'''] )
snake_case_ : List[str] = Image.open(ds[2]['''file'''] )
snake_case_ : str = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : List[Any] = BeitImageProcessor if is_vision_available() else None
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : int = BeitImageProcessingTester(self )
@property
def lowerCamelCase (self ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''size''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(__magic_name__ , '''center_crop''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_normalize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''image_mean''' ) )
self.assertTrue(hasattr(__magic_name__ , '''image_std''' ) )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , __magic_name__ )
snake_case_ : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__magic_name__ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , __magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : Any = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : Optional[int] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : List[str] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
snake_case_ : Union[str, Any] = []
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
snake_case_ : Any = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
snake_case_ , snake_case_ : Optional[int] = prepare_semantic_single_inputs()
snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
snake_case_ , snake_case_ : Dict = prepare_semantic_batch_inputs()
snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
snake_case_ , snake_case_ : Tuple = prepare_semantic_single_inputs()
snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
snake_case_ : List[Any] = True
snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 60 | 1 |
def lowerCamelCase_ ( _UpperCamelCase = 1_000 ) -> int:
"""simple docstring"""
snake_case_ : Tuple = 3
snake_case_ : List[Any] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 60 |
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
lowerCAmelCase_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
lowerCAmelCase_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__="uniform_average" , __magic_name__=True ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = mean_squared_error(
__magic_name__ , __magic_name__ , sample_weight=__magic_name__ , multioutput=__magic_name__ , squared=__magic_name__ )
return {"mse": mse}
| 60 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> None:
"""simple docstring"""
snake_case_ : Dict = len(_UpperCamelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_UpperCamelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _UpperCamelCase , _UpperCamelCase , )
def lowerCamelCase_ ( _UpperCamelCase ) -> None:
"""simple docstring"""
snake_case_ : list[list[str]] = []
depth_first_search([] , [] , [] , _UpperCamelCase , _UpperCamelCase )
# Print all the boards
for board in boards:
for column in board:
print(_UpperCamelCase )
print('''''' )
print(len(_UpperCamelCase ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 60 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __lowerCAmelCase :
lowerCamelCase_ : Any = None
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
snake_case_ : List[Any] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Optional[int] = os.path.join(__magic_name__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(__magic_name__ )
snake_case_ : str = self.feature_extraction_class.from_json_file(__magic_name__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : str = feat_extract_first.save_pretrained(__magic_name__ )[0]
check_json_file_has_correct_format(__magic_name__ )
snake_case_ : Dict = self.feature_extraction_class.from_pretrained(__magic_name__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.feature_extraction_class()
self.assertIsNotNone(__magic_name__ )
| 60 | 1 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_UpperCamelCase ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_UpperCamelCase ):
http_head('''https://huggingface.co''' )
| 60 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase :
lowerCamelCase_ : str
lowerCamelCase_ : str = None
@staticmethod
def lowerCamelCase () -> Any:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowerCamelCase (cls ) -> List[Any]:
'''simple docstring'''
return F'''`pip install {cls.pip_package or cls.name}`'''
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[int] = '''optuna'''
@staticmethod
def lowerCamelCase () -> Union[str, Any]:
'''simple docstring'''
return is_optuna_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return run_hp_search_optuna(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return default_hp_space_optuna(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''ray'''
lowerCamelCase_ : List[str] = '''\'ray[tune]\''''
@staticmethod
def lowerCamelCase () -> List[Any]:
'''simple docstring'''
return is_ray_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_ray(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return default_hp_space_ray(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''sigopt'''
@staticmethod
def lowerCamelCase () -> Optional[int]:
'''simple docstring'''
return is_sigopt_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> List[str]:
'''simple docstring'''
return run_hp_search_sigopt(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
return default_hp_space_sigopt(__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''wandb'''
@staticmethod
def lowerCamelCase () -> Dict:
'''simple docstring'''
return is_wandb_available()
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_wandb(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
return default_hp_space_wandb(__magic_name__ )
lowerCAmelCase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
snake_case_ : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_UpperCamelCase ) > 0:
snake_case_ : Dict = available_backends[0].name
if len(_UpperCamelCase ) > 1:
logger.info(
f'''{len(_UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''ViTFeatureExtractor''']
lowerCAmelCase_ = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list:
"""simple docstring"""
snake_case_ : Tuple = len(_UpperCamelCase )
snake_case_ : Union[str, Any] = [[0] * n for i in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
snake_case_ : Any = y_points[i]
for i in range(2 , _UpperCamelCase ):
for j in range(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Optional[int] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase_ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
snake_case_ : Optional[Any] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
snake_case_ : Union[str, Any] = get_sagemaker_input()
else:
snake_case_ : Union[str, Any] = get_cluster_input()
return config
def lowerCamelCase_ ( _UpperCamelCase=None ) -> Optional[int]:
"""simple docstring"""
if subparsers is not None:
snake_case_ : Tuple = subparsers.add_parser('''config''' , description=_UpperCamelCase )
else:
snake_case_ : Tuple = argparse.ArgumentParser('''Accelerate config command''' , description=_UpperCamelCase )
parser.add_argument(
'''--config_file''' , default=_UpperCamelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=_UpperCamelCase )
return parser
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Any = get_user_input()
if args.config_file is not None:
snake_case_ : Tuple = args.config_file
else:
if not os.path.isdir(_UpperCamelCase ):
os.makedirs(_UpperCamelCase )
snake_case_ : Tuple = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(_UpperCamelCase )
else:
config.to_yaml_file(_UpperCamelCase )
print(f'''accelerate configuration saved at {config_file}''' )
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = config_command_parser()
snake_case_ : Any = parser.parse_args()
config_command(_UpperCamelCase )
if __name__ == "__main__":
main()
| 60 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return getitem, k
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
return setitem, k, v
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
return delitem, k
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) -> str:
"""simple docstring"""
try:
return fun(_UpperCamelCase , *_UpperCamelCase ), None
except Exception as e:
return None, e
lowerCAmelCase_ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCAmelCase_ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCAmelCase_ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCAmelCase_ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCAmelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCAmelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
snake_case_ : Any = HashMap(initial_block_size=4 )
snake_case_ : Union[str, Any] = {}
for _, (fun, *args) in enumerate(_UpperCamelCase ):
snake_case_ , snake_case_ : str = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
assert my_res == py_res
assert str(_UpperCamelCase ) == str(_UpperCamelCase )
assert set(_UpperCamelCase ) == set(_UpperCamelCase )
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
assert set(my.items() ) == set(py.items() )
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
def is_public(_UpperCamelCase ) -> bool:
return not name.startswith('''_''' )
snake_case_ : str = {name for name in dir({} ) if is_public(_UpperCamelCase )}
snake_case_ : str = {name for name in dir(HashMap() ) if is_public(_UpperCamelCase )}
assert dict_public_names > hash_public_names
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 60 |
from __future__ import annotations
def lowerCamelCase_ ( _UpperCamelCase ) -> list:
"""simple docstring"""
if len(_UpperCamelCase ) == 0:
return []
snake_case_ , snake_case_ : Dict = min(_UpperCamelCase ), max(_UpperCamelCase )
snake_case_ : List[str] = int(max_value - min_value ) + 1
snake_case_ : list[list] = [[] for _ in range(_UpperCamelCase )]
for i in my_list:
buckets[int(i - min_value )].append(_UpperCamelCase )
return [v for bucket in buckets for v in sorted(_UpperCamelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 60 | 1 |
import random
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> tuple:
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ : Optional[Any] = [], [], []
for element in data:
if element < pivot:
less.append(_UpperCamelCase )
elif element > pivot:
greater.append(_UpperCamelCase )
else:
equal.append(_UpperCamelCase )
return less, equal, greater
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
if index >= len(_UpperCamelCase ) or index < 0:
return None
snake_case_ : Union[str, Any] = items[random.randint(0 , len(_UpperCamelCase ) - 1 )]
snake_case_ : Optional[Any] = 0
snake_case_ , snake_case_ , snake_case_ : List[str] = _partition(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[Any] = len(_UpperCamelCase )
snake_case_ : Optional[int] = len(_UpperCamelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_UpperCamelCase , _UpperCamelCase )
# must be in larger
else:
return quick_select(_UpperCamelCase , index - (m + count) )
| 60 |
import tensorflow as tf
from ...tf_utils import shape_list
class __lowerCAmelCase ( tf.keras.layers.Layer ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=1 , __magic_name__=False , **__magic_name__ ) -> Dict:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : List[Any] = vocab_size
snake_case_ : Dict = d_embed
snake_case_ : Union[str, Any] = d_proj
snake_case_ : str = cutoffs + [vocab_size]
snake_case_ : int = [0] + self.cutoffs
snake_case_ : Optional[int] = div_val
snake_case_ : int = self.cutoffs[0]
snake_case_ : Any = len(self.cutoffs ) - 1
snake_case_ : Union[str, Any] = self.shortlist_size + self.n_clusters
snake_case_ : str = keep_order
snake_case_ : int = []
snake_case_ : Union[str, Any] = []
def lowerCamelCase (self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
if self.n_clusters > 0:
snake_case_ : Tuple = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_weight''' )
snake_case_ : Optional[Any] = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=__magic_name__ , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
snake_case_ : List[str] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' , )
self.out_projs.append(__magic_name__ )
else:
self.out_projs.append(__magic_name__ )
snake_case_ : Optional[Any] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , )
snake_case_ : List[str] = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case_ : Optional[Any] = self.d_embed // (self.div_val**i)
snake_case_ : int = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_projs_._{i}''' )
self.out_projs.append(__magic_name__ )
snake_case_ : int = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._weight''' , )
snake_case_ : Any = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=__magic_name__ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(__magic_name__ )
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = x
if proj is not None:
snake_case_ : List[str] = tf.einsum('''ibd,ed->ibe''' , __magic_name__ , __magic_name__ )
return tf.einsum('''ibd,nd->ibn''' , __magic_name__ , __magic_name__ ) + b
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = shape_list(__magic_name__ )
snake_case_ : Tuple = tf.range(lp_size[0] , dtype=target.dtype )
snake_case_ : Dict = tf.stack([r, target] , 1 )
return tf.gather_nd(__magic_name__ , __magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=True , __magic_name__=False ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = 0
if self.n_clusters == 0:
snake_case_ : Any = self._logit(__magic_name__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
snake_case_ : Union[str, Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__magic_name__ , logits=__magic_name__ )
snake_case_ : Optional[Any] = tf.nn.log_softmax(__magic_name__ , axis=-1 )
else:
snake_case_ : Optional[int] = shape_list(__magic_name__ )
snake_case_ : int = []
snake_case_ : List[Any] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
snake_case_ , snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
snake_case_ : str = (target >= l_idx) & (target < r_idx)
snake_case_ : Dict = tf.where(__magic_name__ )
snake_case_ : List[str] = tf.boolean_mask(__magic_name__ , __magic_name__ ) - l_idx
if self.div_val == 1:
snake_case_ : Any = self.out_layers[0][0][l_idx:r_idx]
snake_case_ : Dict = self.out_layers[0][1][l_idx:r_idx]
else:
snake_case_ : Union[str, Any] = self.out_layers[i][0]
snake_case_ : int = self.out_layers[i][1]
if i == 0:
snake_case_ : List[Any] = tf.concat([cur_W, self.cluster_weight] , 0 )
snake_case_ : Tuple = tf.concat([cur_b, self.cluster_bias] , 0 )
snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[0] )
snake_case_ : Any = tf.nn.log_softmax(__magic_name__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ )
snake_case_ : Tuple = self._gather_logprob(__magic_name__ , __magic_name__ )
else:
snake_case_ : Optional[int] = self._logit(__magic_name__ , __magic_name__ , __magic_name__ , self.out_projs[i] )
snake_case_ : Union[str, Any] = tf.nn.log_softmax(__magic_name__ )
snake_case_ : Optional[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
snake_case_ : Optional[int] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__magic_name__ )
if target is not None:
snake_case_ : Any = tf.boolean_mask(__magic_name__ , __magic_name__ )
snake_case_ : Optional[Any] = tf.boolean_mask(__magic_name__ , __magic_name__ )
snake_case_ : str = self._gather_logprob(__magic_name__ , __magic_name__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__magic_name__ , -cur_logprob , shape_list(__magic_name__ ) )
snake_case_ : str = tf.concat(__magic_name__ , axis=-1 )
if target is not None:
if return_mean:
snake_case_ : int = tf.reduce_mean(__magic_name__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__magic_name__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__magic_name__ , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 60 | 1 |
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def lowerCamelCase_ ( _UpperCamelCase ) -> bool:
"""simple docstring"""
snake_case_ : Union[str, Any] = 0
snake_case_ : Dict = number
while duplicate > 0:
snake_case_ , snake_case_ : Any = divmod(_UpperCamelCase , 10 )
fact_sum += factorial(_UpperCamelCase )
return fact_sum == number
if __name__ == "__main__":
print('''Program to check whether a number is a Krisnamurthy Number or not.''')
lowerCAmelCase_ = int(input('''Enter number: ''').strip())
print(
F'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.'''
)
| 60 |
import requests
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None:
"""simple docstring"""
snake_case_ : Tuple = {'''Content-Type''': '''application/json'''}
snake_case_ : Any = requests.post(_UpperCamelCase , json={'''text''': message_body} , headers=_UpperCamelCase )
if response.status_code != 200:
snake_case_ : List[Any] = (
'''Request to slack returned an error '''
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 60 | 1 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return getitem, k
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Any:
"""simple docstring"""
return setitem, k, v
def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple:
"""simple docstring"""
return delitem, k
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) -> str:
"""simple docstring"""
try:
return fun(_UpperCamelCase , *_UpperCamelCase ), None
except Exception as e:
return None, e
lowerCAmelCase_ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCAmelCase_ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCAmelCase_ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCAmelCase_ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCAmelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCAmelCase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
snake_case_ : Any = HashMap(initial_block_size=4 )
snake_case_ : Union[str, Any] = {}
for _, (fun, *args) in enumerate(_UpperCamelCase ):
snake_case_ , snake_case_ : str = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase )
assert my_res == py_res
assert str(_UpperCamelCase ) == str(_UpperCamelCase )
assert set(_UpperCamelCase ) == set(_UpperCamelCase )
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
assert set(my.items() ) == set(py.items() )
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
def is_public(_UpperCamelCase ) -> bool:
return not name.startswith('''_''' )
snake_case_ : str = {name for name in dir({} ) if is_public(_UpperCamelCase )}
snake_case_ : str = {name for name in dir(HashMap() ) if is_public(_UpperCamelCase )}
assert dict_public_names > hash_public_names
| 60 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 | 1 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = ['''model.decoder.embed_positions.weights''']
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if "emb" in name:
snake_case_ : List[Any] = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
snake_case_ : Tuple = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
snake_case_ : str = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
snake_case_ : int = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
snake_case_ : Dict = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
snake_case_ : Optional[Any] = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
snake_case_ : Any = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
snake_case_ : List[Any] = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
snake_case_ : int = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
snake_case_ : Dict = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
snake_case_ : int = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Tuple[Dict, Dict]:
"""simple docstring"""
snake_case_ : List[Any] = list(state_dict.keys() )
snake_case_ : Any = {}
for key in keys:
snake_case_ : int = state_dict.pop(_UpperCamelCase )
snake_case_ : int = rename_keys(_UpperCamelCase )
if "in_proj_weight" in key:
# split fused qkv proj
snake_case_ : List[Any] = val[:hidden_size, :]
snake_case_ : List[str] = val[hidden_size : 2 * hidden_size, :]
snake_case_ : Dict = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
snake_case_ : Optional[int] = val
else:
snake_case_ : Any = val
return state_dict, enc_dec_proj_state_dict
def lowerCamelCase_ ( _UpperCamelCase ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
snake_case_ : Dict = 1_024
snake_case_ : Optional[int] = 24
snake_case_ : List[Any] = 16
elif checkpoint == "medium":
snake_case_ : Tuple = 1_536
snake_case_ : Any = 48
snake_case_ : List[Any] = 24
elif checkpoint == "large":
snake_case_ : List[Any] = 2_048
snake_case_ : Tuple = 48
snake_case_ : str = 32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
snake_case_ : int = MusicgenDecoderConfig(
hidden_size=_UpperCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_UpperCamelCase , num_attention_heads=_UpperCamelCase , )
return config
@torch.no_grad()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="cpu" ) -> int:
"""simple docstring"""
snake_case_ : Dict = MusicGen.get_pretrained(_UpperCamelCase , device=_UpperCamelCase )
snake_case_ : Dict = decoder_config_from_checkpoint(_UpperCamelCase )
snake_case_ : Any = fairseq_model.lm.state_dict()
snake_case_ , snake_case_ : int = rename_state_dict(
_UpperCamelCase , hidden_size=decoder_config.hidden_size )
snake_case_ : Any = TaEncoderModel.from_pretrained('''t5-base''' )
snake_case_ : int = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
snake_case_ : List[Any] = MusicgenForCausalLM(_UpperCamelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
snake_case_ , snake_case_ : str = decoder.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(_UpperCamelCase ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
snake_case_ : Any = MusicgenForConditionalGeneration(text_encoder=_UpperCamelCase , audio_encoder=_UpperCamelCase , decoder=_UpperCamelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_UpperCamelCase )
# check we can do a forward pass
snake_case_ : int = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
snake_case_ : Dict = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
snake_case_ : str = model(input_ids=_UpperCamelCase , decoder_input_ids=_UpperCamelCase ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained('''t5-base''' )
snake_case_ : int = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
snake_case_ : List[Any] = MusicgenProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
# set the appropriate bos/pad token ids
snake_case_ : int = 2_048
snake_case_ : str = 2_048
# set other default generation config params
snake_case_ : Union[str, Any] = int(30 * audio_encoder.config.frame_rate )
snake_case_ : int = True
snake_case_ : Optional[int] = 3.0
if pytorch_dump_folder is not None:
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(_UpperCamelCase )
processor.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 60 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Tuple = '''owlvit_text_model'''
def __init__(self , __magic_name__=4_9408 , __magic_name__=512 , __magic_name__=2048 , __magic_name__=12 , __magic_name__=8 , __magic_name__=16 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , __magic_name__=0 , __magic_name__=4_9406 , __magic_name__=4_9407 , **__magic_name__ , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
snake_case_ : int = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[Any] = intermediate_size
snake_case_ : str = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : str = hidden_act
snake_case_ : Union[str, Any] = layer_norm_eps
snake_case_ : Dict = attention_dropout
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : int = initializer_factor
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : str = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
snake_case_ : str = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = '''owlvit_vision_model'''
def __init__(self , __magic_name__=768 , __magic_name__=3072 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3 , __magic_name__=768 , __magic_name__=32 , __magic_name__="quick_gelu" , __magic_name__=1e-5 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1.0 , **__magic_name__ , ) -> int:
'''simple docstring'''
super().__init__(**__magic_name__ )
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : List[Any] = num_channels
snake_case_ : Union[str, Any] = image_size
snake_case_ : Dict = patch_size
snake_case_ : List[Any] = hidden_act
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Dict = attention_dropout
snake_case_ : List[str] = initializer_range
snake_case_ : List[Any] = initializer_factor
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
snake_case_ : str = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = '''owlvit'''
lowerCamelCase_ : Optional[int] = True
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=512 , __magic_name__=2.6_592 , __magic_name__=True , **__magic_name__ , ) -> int:
'''simple docstring'''
super().__init__(**__magic_name__ )
if text_config is None:
snake_case_ : Tuple = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
snake_case_ : str = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
snake_case_ : str = OwlViTTextConfig(**__magic_name__ )
snake_case_ : Union[str, Any] = OwlViTVisionConfig(**__magic_name__ )
snake_case_ : Any = projection_dim
snake_case_ : Union[str, Any] = logit_scale_init_value
snake_case_ : str = return_dict
snake_case_ : Any = 1.0
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__magic_name__ )
snake_case_ , snake_case_ : Optional[Any] = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = {}
snake_case_ : Union[str, Any] = text_config
snake_case_ : Optional[Any] = vision_config
return cls.from_dict(__magic_name__ , **__magic_name__ )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = copy.deepcopy(self.__dict__ )
snake_case_ : List[Any] = self.text_config.to_dict()
snake_case_ : List[Any] = self.vision_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
class __lowerCAmelCase ( _a ):
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def lowerCamelCase (self ) -> float:
'''simple docstring'''
return 1e-4
def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ : Dict = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__magic_name__ , seq_length=__magic_name__ , framework=__magic_name__ )
snake_case_ : List[str] = super().generate_dummy_inputs(
processor.image_processor , batch_size=__magic_name__ , framework=__magic_name__ )
return {**text_input_dict, **image_input_dict}
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return 14
| 60 | 1 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''char'''
lowerCamelCase_ : List[Any] = '''bpe'''
lowerCamelCase_ : Tuple = '''wp'''
lowerCAmelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : int = ['''image_processor''', '''char_tokenizer''']
lowerCamelCase_ : List[Any] = '''ViTImageProcessor'''
lowerCamelCase_ : Optional[Any] = '''MgpstrTokenizer'''
def __init__(self , __magic_name__=None , __magic_name__=None , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __magic_name__ , )
snake_case_ : Optional[Any] = kwargs.pop('''feature_extractor''' )
snake_case_ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
snake_case_ : Optional[int] = tokenizer
snake_case_ : Any = AutoTokenizer.from_pretrained('''gpt2''' )
snake_case_ : Tuple = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(__magic_name__ , __magic_name__ )
def __call__(self , __magic_name__=None , __magic_name__=None , __magic_name__=None , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case_ : Union[str, Any] = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None:
snake_case_ : Tuple = self.char_tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case_ : str = encodings['''input_ids''']
return inputs
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ : Any = sequences
snake_case_ : int = char_preds.size(0 )
snake_case_ , snake_case_ : Optional[Any] = self._decode_helper(__magic_name__ , '''char''' )
snake_case_ , snake_case_ : Any = self._decode_helper(__magic_name__ , '''bpe''' )
snake_case_ , snake_case_ : Union[str, Any] = self._decode_helper(__magic_name__ , '''wp''' )
snake_case_ : List[Any] = []
snake_case_ : Any = []
for i in range(__magic_name__ ):
snake_case_ : Any = [char_scores[i], bpe_scores[i], wp_scores[i]]
snake_case_ : str = [char_strs[i], bpe_strs[i], wp_strs[i]]
snake_case_ : Dict = scores.index(max(__magic_name__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
snake_case_ : Dict = {}
snake_case_ : int = final_strs
snake_case_ : List[Any] = final_scores
snake_case_ : Optional[Any] = char_strs
snake_case_ : Dict = bpe_strs
snake_case_ : Any = wp_strs
return out
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
snake_case_ : List[Any] = self.char_decode
snake_case_ : Any = 1
snake_case_ : Dict = '''[s]'''
elif format == DecodeType.BPE:
snake_case_ : List[Any] = self.bpe_decode
snake_case_ : int = 2
snake_case_ : int = '''#'''
elif format == DecodeType.WORDPIECE:
snake_case_ : Tuple = self.wp_decode
snake_case_ : List[Any] = 102
snake_case_ : Any = '''[SEP]'''
else:
raise ValueError(F'''Format {format} is not supported.''' )
snake_case_ , snake_case_ : Optional[Any] = [], []
snake_case_ : Optional[int] = pred_logits.size(0 )
snake_case_ : List[Any] = pred_logits.size(1 )
snake_case_ , snake_case_ : str = pred_logits.topk(1 , dim=-1 , largest=__magic_name__ , sorted=__magic_name__ )
snake_case_ : List[Any] = preds_index.view(-1 , __magic_name__ )[:, 1:]
snake_case_ : List[Any] = decoder(__magic_name__ )
snake_case_ , snake_case_ : int = torch.nn.functional.softmax(__magic_name__ , dim=2 ).max(dim=2 )
snake_case_ : int = preds_max_prob[:, 1:]
for index in range(__magic_name__ ):
snake_case_ : int = preds_str[index].find(__magic_name__ )
snake_case_ : Optional[int] = preds_str[index][:pred_eos]
snake_case_ : Optional[int] = preds_index[index].cpu().tolist()
snake_case_ : Optional[int] = pred_index.index(__magic_name__ ) if eos_token in pred_index else -1
snake_case_ : Dict = preds_max_prob[index][: pred_eos_index + 1]
snake_case_ : int = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__magic_name__ )
conf_scores.append(__magic_name__ )
return dec_strs, conf_scores
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(__magic_name__ )]
return decode_strs
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(__magic_name__ )]
return decode_strs
| 60 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : Tuple = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase_ : Union[str, Any] = ['''accelerate''', '''launch''']
lowerCamelCase_ : Tuple = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase_ : Tuple = '''default_config.yaml'''
lowerCamelCase_ : str = config_folder / config_file
lowerCamelCase_ : List[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase_ : Dict = Path('''tests/test_configs''' )
@classmethod
def lowerCamelCase (cls ) -> Dict:
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowerCamelCase (cls ) -> Any:
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=__magic_name__ ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(__magic_name__ ), self.test_file_path] , env=os.environ.copy() )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class __lowerCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : List[str] = '''test-tpu'''
lowerCamelCase_ : Dict = '''us-central1-a'''
lowerCamelCase_ : Any = '''ls'''
lowerCamelCase_ : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase_ : Tuple = '''cd /usr/share'''
lowerCamelCase_ : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase_ : List[Any] = '''Running gcloud compute tpus tpu-vm ssh'''
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : int = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[str] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=__magic_name__ )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Any = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : str = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Tuple = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=__magic_name__ , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , __magic_name__ , )
| 60 | 1 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase_ = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def lowerCamelCase_ ( _UpperCamelCase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
snake_case_ : Tuple = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
snake_case_ : Tuple = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
snake_case_ : int = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(F'''Job {i:>2} is {job[0]} at {job[1]}''')
| 60 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__=None , **__magic_name__ ) -> Dict:
'''simple docstring'''
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , __magic_name__ , )
super().__init__(args=__magic_name__ , **__magic_name__ )
| 60 | 1 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=False , ) -> int:
'''simple docstring'''
snake_case_ : int = size if size is not None else {'''height''': 20, '''width''': 20}
snake_case_ : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
snake_case_ : str = parent
snake_case_ : Optional[int] = batch_size
snake_case_ : Dict = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Union[str, Any] = min_resolution
snake_case_ : Tuple = max_resolution
snake_case_ : str = do_resize
snake_case_ : Tuple = size
snake_case_ : int = do_center_crop
snake_case_ : Tuple = crop_size
snake_case_ : int = do_normalize
snake_case_ : Optional[Any] = image_mean
snake_case_ : List[str] = image_std
snake_case_ : str = do_reduce_labels
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
snake_case_ : Union[str, Any] = Image.open(dataset[0]['''file'''] )
snake_case_ : str = Image.open(dataset[1]['''file'''] )
return image, map
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
snake_case_ : str = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
snake_case_ : Optional[Any] = Image.open(ds[0]['''file'''] )
snake_case_ : Optional[Any] = Image.open(ds[1]['''file'''] )
snake_case_ : List[str] = Image.open(ds[2]['''file'''] )
snake_case_ : str = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : List[Any] = BeitImageProcessor if is_vision_available() else None
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : int = BeitImageProcessingTester(self )
@property
def lowerCamelCase (self ) -> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''size''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(__magic_name__ , '''center_crop''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_normalize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''image_mean''' ) )
self.assertTrue(hasattr(__magic_name__ , '''image_std''' ) )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , __magic_name__ )
snake_case_ : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__magic_name__ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , __magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : Any = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : Optional[int] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : List[str] = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
snake_case_ : Union[str, Any] = []
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
snake_case_ : Any = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
snake_case_ , snake_case_ : Optional[int] = prepare_semantic_single_inputs()
snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
snake_case_ , snake_case_ : Dict = prepare_semantic_batch_inputs()
snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
snake_case_ , snake_case_ : Tuple = prepare_semantic_single_inputs()
snake_case_ : Optional[int] = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
snake_case_ : List[Any] = True
snake_case_ : int = image_processing(__magic_name__ , __magic_name__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 60 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
snake_case_ : str = '''mock-s3-bucket'''
snake_case_ : str = f'''s3://{mock_bucket}'''
snake_case_ : Any = extract_path_from_uri(_UpperCamelCase )
assert dataset_path.startswith('''s3://''' ) is False
snake_case_ : Optional[Any] = '''./local/path'''
snake_case_ : List[str] = extract_path_from_uri(_UpperCamelCase )
assert dataset_path == new_dataset_path
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = is_remote_filesystem(_UpperCamelCase )
assert is_remote is True
snake_case_ : Union[str, Any] = fsspec.filesystem('''file''' )
snake_case_ : int = is_remote_filesystem(_UpperCamelCase )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
snake_case_ : Optional[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
snake_case_ : List[Any] = f'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCamelCase )
snake_case_ : Dict = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
snake_case_ : int = os.path.basename(_UpperCamelCase )
snake_case_ : Any = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f, open(_UpperCamelCase , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Union[str, Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
snake_case_ : Any = compressed_file_paths[protocol]
snake_case_ : Any = '''dataset.jsonl'''
snake_case_ : Dict = f'''{protocol}://{member_file_path}::{compressed_file_path}'''
snake_case_ , *snake_case_ : Optional[Any] = fsspec.get_fs_token_paths(_UpperCamelCase )
assert fs.isfile(_UpperCamelCase )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = hf_api.dataset_info(_UpperCamelCase , token=_UpperCamelCase )
snake_case_ : List[str] = HfFileSystem(repo_info=_UpperCamelCase , token=_UpperCamelCase )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(_UpperCamelCase ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
snake_case_ : Tuple = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_UpperCamelCase , _UpperCamelCase , clobber=_UpperCamelCase )
with pytest.warns(_UpperCamelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_UpperCamelCase ) == 1
assert (
str(warning_info[0].message )
== f'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 60 | 1 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(
_a, r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''', )
class __lowerCAmelCase ( _a ):
def lowerCamelCase (self , __magic_name__ ) -> np.ndarray:
'''simple docstring'''
if self.framework == "tf":
snake_case_ : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
snake_case_ : Dict = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__magic_name__ )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def lowerCamelCase (self , __magic_name__ ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Tuple = self.get_masked_index(__magic_name__ )
snake_case_ : int = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def lowerCamelCase (self , __magic_name__ ) -> Tuple:
'''simple docstring'''
if isinstance(__magic_name__ , __magic_name__ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Dict[str, GenericTensor]:
'''simple docstring'''
if return_tensors is None:
snake_case_ : Tuple = self.framework
snake_case_ : int = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ )
self.ensure_exactly_one_mask_token(__magic_name__ )
return model_inputs
def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.model(**__magic_name__ )
snake_case_ : List[Any] = model_inputs['''input_ids''']
return model_outputs
def lowerCamelCase (self , __magic_name__ , __magic_name__=5 , __magic_name__=None ) -> Optional[int]:
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
snake_case_ : Tuple = target_ids.shape[0]
snake_case_ : Tuple = model_outputs['''input_ids'''][0]
snake_case_ : Tuple = model_outputs['''logits''']
if self.framework == "tf":
snake_case_ : Optional[int] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
snake_case_ : Optional[Any] = outputs.numpy()
snake_case_ : Any = outputs[0, masked_index, :]
snake_case_ : List[Any] = stable_softmax(__magic_name__ , axis=-1 )
if target_ids is not None:
snake_case_ : Union[str, Any] = tf.gather_nd(tf.squeeze(__magic_name__ , 0 ) , target_ids.reshape(-1 , 1 ) )
snake_case_ : Dict = tf.expand_dims(__magic_name__ , 0 )
snake_case_ : Dict = tf.math.top_k(__magic_name__ , k=__magic_name__ )
snake_case_ , snake_case_ : Dict = topk.values.numpy(), topk.indices.numpy()
else:
snake_case_ : Any = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__magic_name__ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
snake_case_ : int = outputs[0, masked_index, :]
snake_case_ : Union[str, Any] = logits.softmax(dim=-1 )
if target_ids is not None:
snake_case_ : Optional[Any] = probs[..., target_ids]
snake_case_ , snake_case_ : int = probs.topk(__magic_name__ )
snake_case_ : Dict = []
snake_case_ : Dict = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
snake_case_ : Optional[Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
snake_case_ : int = input_ids.numpy().copy()
if target_ids is not None:
snake_case_ : str = target_ids[p].tolist()
snake_case_ : Union[str, Any] = p
# Filter padding out:
snake_case_ : int = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
snake_case_ : Tuple = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
snake_case_ : int = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(__magic_name__ )
result.append(__magic_name__ )
if single_mask:
return result[0]
return result
def lowerCamelCase (self , __magic_name__ , __magic_name__=None ) -> List[Any]:
'''simple docstring'''
if isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : List[str] = [targets]
try:
snake_case_ : str = self.tokenizer.get_vocab()
except Exception:
snake_case_ : Tuple = {}
snake_case_ : str = []
for target in targets:
snake_case_ : int = vocab.get(__magic_name__ , __magic_name__ )
if id_ is None:
snake_case_ : int = self.tokenizer(
__magic_name__ , add_special_tokens=__magic_name__ , return_attention_mask=__magic_name__ , return_token_type_ids=__magic_name__ , max_length=1 , truncation=__magic_name__ , )['''input_ids''']
if len(__magic_name__ ) == 0:
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
snake_case_ : List[Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
snake_case_ : Any = list(set(__magic_name__ ) )
if len(__magic_name__ ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
snake_case_ : Optional[int] = np.array(__magic_name__ )
return target_ids
def lowerCamelCase (self , __magic_name__=None , __magic_name__=None ) -> str:
'''simple docstring'''
snake_case_ : str = {}
if targets is not None:
snake_case_ : str = self.get_target_ids(__magic_name__ , __magic_name__ )
snake_case_ : Dict = target_ids
if top_k is not None:
snake_case_ : Optional[int] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__(self , __magic_name__ , *__magic_name__ , **__magic_name__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = super().__call__(__magic_name__ , **__magic_name__ )
if isinstance(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) == 1:
return outputs[0]
return outputs
| 60 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[Any] = '''encoder-decoder'''
lowerCamelCase_ : Optional[Any] = True
def __init__(self , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__magic_name__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case_ : Any = kwargs.pop('''encoder''' )
snake_case_ : Tuple = encoder_config.pop('''model_type''' )
snake_case_ : Union[str, Any] = kwargs.pop('''decoder''' )
snake_case_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
snake_case_ : Optional[int] = AutoConfig.for_model(__magic_name__ , **__magic_name__ )
snake_case_ : List[str] = AutoConfig.for_model(__magic_name__ , **__magic_name__ )
snake_case_ : Any = True
@classmethod
def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
snake_case_ : Tuple = True
snake_case_ : Optional[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : str = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.encoder.to_dict()
snake_case_ : Dict = self.decoder.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 60 | 1 |
def lowerCamelCase_ ( _UpperCamelCase ) -> bool:
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def lowerCamelCase_ ( _UpperCamelCase ) -> bool:
"""simple docstring"""
snake_case_ : Optional[int] = credit_card_number
snake_case_ : List[Any] = 0
snake_case_ : int = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
snake_case_ : Union[str, Any] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
snake_case_ : List[Any] = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowerCamelCase_ ( _UpperCamelCase ) -> bool:
"""simple docstring"""
snake_case_ : Optional[Any] = f'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(f'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(f'''{error_message} of its length.''' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(f'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(_UpperCamelCase ):
print(f'''{error_message} it fails the Luhn check.''' )
return False
print(f'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('''4111111111111111''')
validate_credit_card_number('''32323''')
| 60 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = question_encoder
snake_case_ : Optional[int] = generator
snake_case_ : Optional[Any] = self.question_encoder
def lowerCamelCase (self , __magic_name__ ) -> Dict:
'''simple docstring'''
if os.path.isfile(__magic_name__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
snake_case_ : str = os.path.join(__magic_name__ , '''question_encoder_tokenizer''' )
snake_case_ : List[Any] = os.path.join(__magic_name__ , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(__magic_name__ )
self.generator.save_pretrained(__magic_name__ )
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> Any:
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
snake_case_ : List[str] = kwargs.pop('''config''' , __magic_name__ )
if config is None:
snake_case_ : int = RagConfig.from_pretrained(__magic_name__ )
snake_case_ : Dict = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
snake_case_ : Dict = AutoTokenizer.from_pretrained(
__magic_name__ , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=__magic_name__ , generator=__magic_name__ )
def __call__(self , *__magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
return self.current_tokenizer(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> Dict:
'''simple docstring'''
return self.generator.batch_decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> int:
'''simple docstring'''
return self.generator.decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.question_encoder
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.generator
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "longest" , __magic_name__ = None , __magic_name__ = True , **__magic_name__ , ) -> BatchEncoding:
'''simple docstring'''
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , __magic_name__ , )
if max_length is None:
snake_case_ : Dict = self.current_tokenizer.model_max_length
snake_case_ : List[str] = self(
__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , max_length=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case_ : Optional[int] = self.current_tokenizer.model_max_length
snake_case_ : Union[str, Any] = self(
text_target=__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , padding=__magic_name__ , max_length=__magic_name__ , truncation=__magic_name__ , **__magic_name__ , )
snake_case_ : str = labels['''input_ids''']
return model_inputs
| 60 | 1 |
import math
def lowerCamelCase_ ( _UpperCamelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ ( _UpperCamelCase = 10_001 ) -> int:
"""simple docstring"""
try:
snake_case_ : Union[str, Any] = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
snake_case_ : list[int] = []
snake_case_ : Optional[int] = 2
while len(_UpperCamelCase ) < nth:
if is_prime(_UpperCamelCase ):
primes.append(_UpperCamelCase )
num += 1
else:
num += 1
return primes[len(_UpperCamelCase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 60 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=30 , __magic_name__=2 , __magic_name__=3 , __magic_name__=True , __magic_name__=True , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=10 , __magic_name__=0.02 , __magic_name__=None , ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : List[Any] = image_size
snake_case_ : Optional[int] = patch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = is_training
snake_case_ : List[Any] = use_labels
snake_case_ : Optional[int] = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : List[str] = type_sequence_label_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case_ : Any = (image_size // patch_size) ** 2
snake_case_ : int = num_patches + 1
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : List[Any] = None
if self.use_labels:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : int = self.get_config()
return config, pixel_values, labels
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : int = ViTMSNModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : List[str] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : int = self.type_sequence_label_size
snake_case_ : Tuple = ViTMSNForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : Any = model(__magic_name__ , labels=__magic_name__ )
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' )
print('''Labels: {labels}''' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ : Optional[int] = 1
snake_case_ : List[str] = ViTMSNForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
snake_case_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : Any = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs
snake_case_ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _a, _a, unittest.TestCase ):
lowerCamelCase_ : List[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowerCamelCase_ : Optional[int] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ : int = False
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : int = False
lowerCamelCase_ : Optional[int] = False
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = ViTMSNModelTester(self )
snake_case_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''' )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Any = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(__magic_name__ )
snake_case_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Optional[int] = [*signature.parameters.keys()]
snake_case_ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = ViTMSNModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None
@slow
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
torch.manual_seed(2 )
snake_case_ : List[str] = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(__magic_name__ )
snake_case_ : str = self.default_image_processor
snake_case_ : str = prepare_img()
snake_case_ : int = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ )
# forward pass
with torch.no_grad():
snake_case_ : Optional[int] = model(**__magic_name__ )
# verify the logits
snake_case_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
snake_case_ : List[Any] = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) )
| 60 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.