code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from math import factorial, pi
def _UpperCamelCase ( __A , __A = 30 ) -> float:
'''simple docstring'''
if not isinstance(__A , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
UpperCamelCase__ = float(__A )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__A ) )
def _UpperCamelCase ( __A , __A = 30 ) -> float:
'''simple docstring'''
if not isinstance(__A , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
UpperCamelCase__ = float(__A )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 80 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCAmelCase : int = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
a__ : Optional[int] =XLNetConfig.from_json_file(SCREAMING_SNAKE_CASE )
a__ : Dict =finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
a__ : List[str] =finetuning_task
a__ : Tuple =GLUE_TASKS_NUM_LABELS[finetuning_task]
a__ : List[Any] =XLNetForSequenceClassification(SCREAMING_SNAKE_CASE )
elif "squad" in finetuning_task:
a__ : Optional[int] =finetuning_task
a__ : Dict =XLNetForQuestionAnswering(SCREAMING_SNAKE_CASE )
else:
a__ : List[Any] =XLNetLMHeadModel(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'''Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
print(f'''Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
UpperCAmelCase : int = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 95 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE ( __A ) -> str:
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE ( self ) -> str:
raise NotImplementedError() | 81 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : List[Any] = """canine"""
def __init__( self , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_6_3_8_4 , lowerCAmelCase__=1_6 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__=0XE0_00 , lowerCAmelCase__=0XE0_01 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=8 , lowerCAmelCase__=1_6_3_8_4 , lowerCAmelCase__=1_2_8 , **lowerCAmelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Optional[int] =max_position_embeddings
a__ : str =hidden_size
a__ : Optional[Any] =num_hidden_layers
a__ : Tuple =num_attention_heads
a__ : Optional[Any] =intermediate_size
a__ : Optional[int] =hidden_act
a__ : List[Any] =hidden_dropout_prob
a__ : Union[str, Any] =attention_probs_dropout_prob
a__ : Optional[Any] =initializer_range
a__ : Union[str, Any] =type_vocab_size
a__ : Optional[int] =layer_norm_eps
# Character config:
a__ : int =downsampling_rate
a__ : Optional[Any] =upsampling_kernel_size
a__ : Union[str, Any] =num_hash_functions
a__ : Any =num_hash_buckets
a__ : int =local_transformer_stride
| 95 | 0 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
A__ = logging.getLogger(__name__)
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = '''summarization'''
__lowerCamelCase = ['''loss''']
__lowerCamelCase = ROUGE_KEYS
__lowerCamelCase = '''rouge2'''
def __init__( self , _snake_case , **_snake_case ):
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
_lowerCAmelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(_snake_case , num_labels=_snake_case , mode=self.mode , **_snake_case )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
_lowerCAmelCase = Path(self.output_dir ) / """metrics.json"""
_lowerCAmelCase = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
_lowerCAmelCase = 0
_lowerCAmelCase = defaultdict(_snake_case )
_lowerCAmelCase = self.config.model_type
_lowerCAmelCase = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
_lowerCAmelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
_lowerCAmelCase = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
_lowerCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_lowerCAmelCase = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], F'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
_lowerCAmelCase = get_git_info()["""repo_sha"""]
_lowerCAmelCase = hparams.num_workers
_lowerCAmelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _snake_case ):
_lowerCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_lowerCAmelCase = self.decoder_start_token_id
_lowerCAmelCase = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
_lowerCAmelCase = False
_lowerCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_lowerCAmelCase = self.hparams.eval_max_gen_length
else:
_lowerCAmelCase = self.model.config.max_length
_lowerCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(_snake_case , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
_lowerCAmelCase = True
return readable_batch
def snake_case ( self , _snake_case , **_snake_case ):
"""simple docstring"""
return self.model(_snake_case , **_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer.batch_decode(
_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )
return lmap(str.strip , _snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer.pad_token_id
_lowerCAmelCase , _lowerCAmelCase = batch["""input_ids"""], batch["""attention_mask"""]
_lowerCAmelCase = batch["""labels"""]
if isinstance(self.model , _snake_case ):
_lowerCAmelCase = self.model._shift_right(_snake_case )
else:
_lowerCAmelCase = shift_tokens_right(_snake_case , _snake_case )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_lowerCAmelCase = decoder_input_ids
self.save_readable_batch(_snake_case )
_lowerCAmelCase = self(_snake_case , attention_mask=_snake_case , decoder_input_ids=_snake_case , use_cache=_snake_case )
_lowerCAmelCase = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_lowerCAmelCase = nn.CrossEntropyLoss(ignore_index=_snake_case )
assert lm_logits.shape[-1] == self.vocab_size
_lowerCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
_lowerCAmelCase = nn.functional.log_softmax(_snake_case , dim=-1 )
_lowerCAmelCase , _lowerCAmelCase = label_smoothed_nll_loss(
_snake_case , _snake_case , self.hparams.label_smoothing , ignore_index=_snake_case )
return (loss,)
@property
def snake_case ( self ):
"""simple docstring"""
return self.tokenizer.pad_token_id
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self._step(_snake_case )
_lowerCAmelCase = dict(zip(self.loss_names , _snake_case ) )
# tokens per batch
_lowerCAmelCase = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
_lowerCAmelCase = batch["""input_ids"""].shape[0]
_lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).sum()
_lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
return self._generative_step(_snake_case )
def snake_case ( self , _snake_case , _snake_case="val" ):
"""simple docstring"""
self.step_count += 1
_lowerCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
_lowerCAmelCase = losses["""loss"""]
_lowerCAmelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
_lowerCAmelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_lowerCAmelCase = torch.tensor(_snake_case ).type_as(_snake_case )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_snake_case )
_lowerCAmelCase = {F'{prefix}_avg_{k}': x for k, x in losses.items()}
_lowerCAmelCase = self.step_count
self.metrics[prefix].append(_snake_case ) # callback writes this to self.metrics_save_path
_lowerCAmelCase = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'{prefix}_loss': loss,
F'{prefix}_{self.val_metric}': metric_tensor,
}
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
return calculate_rouge(_snake_case , _snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_lowerCAmelCase = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=_snake_case , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
_lowerCAmelCase = (time.time() - ta) / batch["""input_ids"""].shape[0]
_lowerCAmelCase = self.ids_to_clean_text(_snake_case )
_lowerCAmelCase = self.ids_to_clean_text(batch["""labels"""] )
_lowerCAmelCase = self._step(_snake_case )
_lowerCAmelCase = dict(zip(self.loss_names , _snake_case ) )
_lowerCAmelCase = self.calc_generative_metrics(_snake_case , _snake_case )
_lowerCAmelCase = np.mean(lmap(_snake_case , _snake_case ) )
base_metrics.update(gen_time=_snake_case , gen_len=_snake_case , preds=_snake_case , target=_snake_case , **_snake_case )
return base_metrics
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
return self._generative_step(_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
return self.validation_epoch_end(_snake_case , prefix="""test""" )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.n_obs[type_path]
_lowerCAmelCase = self.target_lens[type_path]
_lowerCAmelCase = self.dataset_class(
self.tokenizer , type_path=_snake_case , n_obs=_snake_case , max_target_length=_snake_case , **self.dataset_kwargs , )
return dataset
def snake_case ( self , _snake_case , _snake_case , _snake_case = False ):
"""simple docstring"""
_lowerCAmelCase = self.get_dataset(_snake_case )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_lowerCAmelCase = dataset.make_sortish_sampler(_snake_case , distributed=self.hparams.gpus > 1 )
return DataLoader(
_snake_case , batch_size=_snake_case , collate_fn=dataset.collate_fn , shuffle=_snake_case , num_workers=self.num_workers , sampler=_snake_case , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_lowerCAmelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_snake_case , batch_sampler=_snake_case , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_snake_case , batch_size=_snake_case , collate_fn=dataset.collate_fn , shuffle=_snake_case , num_workers=self.num_workers , sampler=_snake_case , )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=_snake_case )
return dataloader
def snake_case ( self ):
"""simple docstring"""
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def snake_case ( self ):
"""simple docstring"""
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def snake_case ( _snake_case , _snake_case ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(_snake_case , _snake_case )
add_generic_args(_snake_case , _snake_case )
parser.add_argument(
"""--max_source_length""" , default=1024 , type=_snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=_snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=_snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=_snake_case , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=_snake_case )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=_snake_case )
parser.add_argument("""--max_tokens_per_batch""" , type=_snake_case , default=_snake_case )
parser.add_argument("""--logger_name""" , type=_snake_case , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=_snake_case , default=-1 , required=_snake_case , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=_snake_case , default=500 , required=_snake_case , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=_snake_case , default=-1 , required=_snake_case , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=_snake_case , default="""summarization""" , required=_snake_case , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=_snake_case , default=0.0 , required=_snake_case )
parser.add_argument("""--src_lang""" , type=_snake_case , default="""""" , required=_snake_case )
parser.add_argument("""--tgt_lang""" , type=_snake_case , default="""""" , required=_snake_case )
parser.add_argument("""--eval_beams""" , type=_snake_case , default=_snake_case , required=_snake_case )
parser.add_argument(
"""--val_metric""" , type=_snake_case , default=_snake_case , required=_snake_case , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=_snake_case , default=_snake_case , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=_snake_case , default=1 , required=_snake_case , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=_snake_case , default=-1 , required=_snake_case , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = '''translation'''
__lowerCamelCase = ['''loss''']
__lowerCamelCase = ['''bleu''']
__lowerCamelCase = '''bleu'''
def __init__( self , _snake_case , **_snake_case ):
"""simple docstring"""
super().__init__(_snake_case , **_snake_case )
_lowerCAmelCase = hparams.src_lang
_lowerCAmelCase = hparams.tgt_lang
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
return calculate_bleu(_snake_case , _snake_case )
def _UpperCAmelCase ( snake_case , snake_case=None ):
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=snake_case )
check_output_dir(snake_case , expected_items=3 )
if model is None:
if "summarization" in args.task:
_lowerCAmelCase = SummarizationModule(snake_case )
else:
_lowerCAmelCase = TranslationModule(snake_case )
_lowerCAmelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
_lowerCAmelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_lowerCAmelCase = os.environ.get("""WANDB_PROJECT""" , snake_case )
_lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=snake_case )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
_lowerCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
_lowerCAmelCase = False
_lowerCAmelCase = args.val_metric == """loss"""
_lowerCAmelCase = generic_train(
snake_case , snake_case , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , snake_case ) , early_stopping_callback=snake_case , logger=snake_case , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
_lowerCAmelCase = """"""
_lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=snake_case ) )
if checkpoints:
_lowerCAmelCase = checkpoints[-1]
_lowerCAmelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
A__ = pl.Trainer.add_argparse_args(parser)
A__ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
A__ = parser.parse_args()
main(args)
| 82 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCAmelCase : int = False
class __lowerCAmelCase ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : str =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
a__ : Optional[Any] =torch.manual_seed(0 )
a__ : Optional[Any] =pipe.dual_guided(
prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase__ )
a__ : str =VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : Optional[Any] =generator.manual_seed(0 )
a__ : Tuple =pipe.dual_guided(
prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : str =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : Optional[Any] ="cyberpunk 2077"
a__ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
a__ : Union[str, Any] =torch.manual_seed(0 )
a__ : Tuple =pipe.dual_guided(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" , ).images
a__ : int =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Any =np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
a__ : str ="A painting of a squirrel eating a burger "
a__ : Optional[int] =torch.manual_seed(0 )
a__ : str =pipe.text_to_image(
prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" ).images
a__ : Any =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Optional[int] =np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
a__ : Optional[Any] =pipe.image_variation(lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="numpy" ).images
a__ : Union[str, Any] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Any =np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 95 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : Optional[int] = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase__ ( lowercase ):
lowercase__ = """deberta-v2"""
def __init__( self : Union[str, Any] ,lowerCamelCase__ : Optional[Any]=128100 ,lowerCamelCase__ : Optional[int]=1536 ,lowerCamelCase__ : Any=24 ,lowerCamelCase__ : Dict=24 ,lowerCamelCase__ : Optional[int]=6144 ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : List[str]=0.1 ,lowerCamelCase__ : Optional[Any]=0.1 ,lowerCamelCase__ : Optional[int]=512 ,lowerCamelCase__ : Dict=0 ,lowerCamelCase__ : Dict=0.0_2 ,lowerCamelCase__ : List[Any]=1E-7 ,lowerCamelCase__ : List[Any]=False ,lowerCamelCase__ : List[str]=-1 ,lowerCamelCase__ : Any=0 ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : Union[str, Any]=0 ,lowerCamelCase__ : Any="gelu" ,**lowerCamelCase__ : str ,):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Any = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : Any = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : List[str] = max_position_embeddings
_UpperCamelCase : Dict = type_vocab_size
_UpperCamelCase : Any = initializer_range
_UpperCamelCase : Union[str, Any] = relative_attention
_UpperCamelCase : List[str] = max_relative_positions
_UpperCamelCase : Dict = pad_token_id
_UpperCamelCase : Optional[Any] = position_biased_input
# Backwards compatibility
if type(lowerCamelCase__ ) == str:
_UpperCamelCase : Optional[Any] = [x.strip() for x in pos_att_type.lower().split('|' )]
_UpperCamelCase : Any = pos_att_type
_UpperCamelCase : int = vocab_size
_UpperCamelCase : List[str] = layer_norm_eps
_UpperCamelCase : Tuple = kwargs.get('pooler_hidden_size' ,lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = pooler_dropout
_UpperCamelCase : str = pooler_hidden_act
class lowercase__ ( lowercase ):
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCamelCase : List[Any] = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return 12
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : int = -1 ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional["TensorType"] = None ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 40 ,lowerCamelCase__ : int = 40 ,lowerCamelCase__ : "PreTrainedTokenizerBase" = None ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = super().generate_dummy_inputs(preprocessor=lowerCamelCase__ ,framework=lowerCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 83 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __lowerCAmelCase :
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
def _lowercase ( self ) -> int:
'''simple docstring'''
raise NotImplementedError()
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : str =tokenizer
a__ : List[str] =skip_prompt
a__ : List[Any] =decode_kwargs
# variables used in the streaming process
a__ : Dict =[]
a__ : int =0
a__ : str =True
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
a__ : Any =value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
a__ : Dict =False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
a__ : List[Any] =text[self.print_len :]
a__ : List[str] =[]
a__ : Optional[int] =0
# If the last token is a CJK character, we print the characters.
elif len(lowerCAmelCase__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
a__ : List[str] =text[self.print_len :]
self.print_len += len(lowerCAmelCase__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
a__ : str =text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(lowerCAmelCase__ )
self.on_finalized_text(lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
if len(self.token_cache ) > 0:
a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
a__ : List[Any] =text[self.print_len :]
a__ : List[str] =[]
a__ : Optional[int] =0
else:
a__ : Union[str, Any] =""
a__ : Any =True
self.on_finalized_text(lowerCAmelCase__ , stream_end=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Optional[Any]:
'''simple docstring'''
print(lowerCAmelCase__ , flush=lowerCAmelCase__ , end="" if not stream_end else None )
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : str =Queue()
a__ : Optional[Any] =None
a__ : Any =timeout
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> List[str]:
'''simple docstring'''
self.text_queue.put(lowerCAmelCase__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> Dict:
'''simple docstring'''
return self
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : int =self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 95 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : int = 1_0_0_0 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 84 |
def _A ( SCREAMING_SNAKE_CASE : int = 50 ):
"""simple docstring"""
a__ : Any =[1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 95 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase_( snake_case : str , snake_case : str ):
'''simple docstring'''
snake_case_ = list(snake_case )
snake_case_ = list(snake_case )
snake_case_ = 0
for i in range(len(snake_case ) ):
if lista[i] != lista[i]:
count += 1
snake_case_ = "_"
if count > 1:
return False
else:
return "".join(snake_case )
def UpperCamelCase_( snake_case : list[str] ):
'''simple docstring'''
snake_case_ = []
while True:
snake_case_ = ["$"] * len(snake_case )
snake_case_ = []
for i in range(len(snake_case ) ):
for j in range(i + 1 , len(snake_case ) ):
snake_case_ = compare_string(binary[i] , binary[j] )
if k is False:
snake_case_ = "*"
snake_case_ = "*"
temp.append("X" )
for i in range(len(snake_case ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case ) == 0:
return pi
snake_case_ = list(set(snake_case ) )
def UpperCamelCase_( snake_case : int , snake_case : Sequence[float] ):
'''simple docstring'''
snake_case_ = []
for minterm in minterms:
snake_case_ = ""
for _ in range(snake_case ):
snake_case_ = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case )
return temp
def UpperCamelCase_( snake_case : str , snake_case : str , snake_case : int ):
'''simple docstring'''
snake_case_ = list(snake_case )
snake_case_ = list(snake_case )
snake_case_ = 0
for i in range(len(snake_case ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase_( snake_case : list[list[int]] , snake_case : list[str] ):
'''simple docstring'''
snake_case_ = []
snake_case_ = [0] * len(snake_case )
for i in range(len(chart[0] ) ):
snake_case_ = 0
snake_case_ = -1
for j in range(len(snake_case ) ):
if chart[j][i] == 1:
count += 1
snake_case_ = j
if count == 1:
snake_case_ = 1
for i in range(len(snake_case ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case ) ):
snake_case_ = 0
temp.append(prime_implicants[i] )
while True:
snake_case_ = 0
snake_case_ = -1
snake_case_ = 0
for i in range(len(snake_case ) ):
snake_case_ = chart[i].count(1 )
if count_n > max_n:
snake_case_ = count_n
snake_case_ = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case ) ):
snake_case_ = 0
def UpperCamelCase_( snake_case : list[str] , snake_case : list[str] ):
'''simple docstring'''
snake_case_ = [[0 for x in range(len(snake_case ) )] for x in range(len(snake_case ) )]
for i in range(len(snake_case ) ):
snake_case_ = prime_implicants[i].count("_" )
for j in range(len(snake_case ) ):
if is_for_table(prime_implicants[i] , binary[j] , snake_case ):
snake_case_ = 1
return chart
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = int(input("Enter the no. of variables\n" ) )
snake_case_ = [
float(snake_case )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
snake_case_ = decimal_to_binary(snake_case , snake_case )
snake_case_ = check(snake_case )
print("Prime Implicants are:" )
print(snake_case )
snake_case_ = prime_implicant_chart(snake_case , snake_case )
snake_case_ = selection(snake_case , snake_case )
print("Essential Prime Implicants are:" )
print(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 85 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
return []
a__ , a__ : int =min(SCREAMING_SNAKE_CASE ), max(SCREAMING_SNAKE_CASE )
a__ : Optional[int] =int(max_value - min_value ) + 1
a__ : list[list] =[[] for _ in range(SCREAMING_SNAKE_CASE )]
for i in my_list:
buckets[int(i - min_value )].append(SCREAMING_SNAKE_CASE )
return [v for bucket in buckets for v in sorted(SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 95 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
lowerCamelCase__ = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCamelCase__ = BASE_URL + """/user"""
# https://github.com/settings/tokens
lowerCamelCase__ = os.environ.get("""USER_TOKEN""", """""")
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[int] = {
'Authorization': F"token {auth_token}",
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(_UpperCamelCase , headers=_UpperCamelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'{key}: {value}')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""") | 86 |
import numpy as np
def _A ( SCREAMING_SNAKE_CASE : np.array ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 0 |
def lowercase_ ( _lowerCamelCase : dict):
lowercase__ : Optional[Any] = set()
# edges = list of graph's edges
lowercase__ : Tuple = get_edges(_lowerCamelCase)
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowercase__ , lowercase__ : str = edges.pop()
chosen_vertices.add(_lowerCamelCase)
chosen_vertices.add(_lowerCamelCase)
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_lowerCamelCase)
return chosen_vertices
def lowercase_ ( _lowerCamelCase : dict):
lowercase__ : Tuple = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node))
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 87 |
import numpy
# List of input, output pairs
UpperCAmelCase : str = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCAmelCase : Optional[int] = (((515, 22, 13), 555), ((61, 35, 49), 150))
UpperCAmelCase : str = [2, 4, 1, 5]
UpperCAmelCase : List[str] = len(train_data)
UpperCAmelCase : Dict = 0.0_0_9
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple="train" ):
"""simple docstring"""
return calculate_hypothesis_value(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - output(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _A ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ : Tuple =0
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=m ):
"""simple docstring"""
a__ : Any =0
for i in range(SCREAMING_SNAKE_CASE ):
if index == -1:
summation_value += _error(SCREAMING_SNAKE_CASE )
else:
summation_value += _error(SCREAMING_SNAKE_CASE ) * train_data[i][0][index]
return summation_value
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Any =summation_of_cost_derivative(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / m
return cost_derivative_value
def _A ( ):
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
a__ : Dict =0.0_0_0_0_0_2
a__ : Union[str, Any] =0
a__ : Any =0
while True:
j += 1
a__ : Any =[0, 0, 0, 0]
for i in range(0 , len(SCREAMING_SNAKE_CASE ) ):
a__ : Tuple =get_cost_derivative(i - 1 )
a__ : List[Any] =(
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE , rtol=SCREAMING_SNAKE_CASE , ):
break
a__ : Optional[Any] =temp_parameter_vector
print(("Number of iterations:", j) )
def _A ( ):
"""simple docstring"""
for i in range(len(SCREAMING_SNAKE_CASE ) ):
print(("Actual output value:", output(SCREAMING_SNAKE_CASE , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(SCREAMING_SNAKE_CASE , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 95 | 0 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__lowerCAmelCase : Dict = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class UpperCAmelCase_ ( unittest.TestCase , _A ):
'''simple docstring'''
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = load_tool("""text-question-answering""" )
self.tool.setup()
__magic_name__ = load_tool("""text-question-answering""" , remote=UpperCamelCase__ )
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.tool(UpperCamelCase__ , """What did Hugging Face do in April 2021?""" )
self.assertEqual(UpperCamelCase__ , """launched the BigScience Research Workshop""" )
def _lowercase ( self : List[str] ) -> int:
"""simple docstring"""
__magic_name__ = self.remote_tool(UpperCamelCase__ , """What did Hugging Face do in April 2021?""" )
self.assertEqual(UpperCamelCase__ , """launched the BigScience Research Workshop""" )
def _lowercase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.tool(text=UpperCamelCase__ , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(UpperCamelCase__ , """launched the BigScience Research Workshop""" )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.remote_tool(text=UpperCamelCase__ , question="""What did Hugging Face do in April 2021?""" )
self.assertEqual(UpperCamelCase__ , """launched the BigScience Research Workshop""" )
| 88 |
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
a__ : Optional[Any] =len(SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
a__ : List[Any] =arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
a__ : int =arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE )]
# Reverse whole list
a__ : List[str] =arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCAmelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase : Optional[int] = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 95 | 0 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ = 1000 ) -> int:
_a , _a : Union[str, Any] = 1, 1
_a : Dict = 2
while True:
_a : Any = 0
_a : Optional[Any] = fa + fa
_a , _a : Union[str, Any] = fa, f
index += 1
for _ in str(lowerCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 89 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Any =tempfile.mkdtemp()
# fmt: off
a__ : List[Any] =["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : str =dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
a__ : List[Any] =["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
a__ : Optional[int] ={"unk_token": "<unk>"}
a__ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
a__ : Optional[Any] ={
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
a__ : Dict =os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[Any] =[np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
a__ : List[Any] =[Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] =self.get_tokenizer()
a__ : int =self.get_rust_tokenizer()
a__ : List[str] =self.get_image_processor()
a__ : Dict =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
a__ : Optional[Any] =CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
a__ : Tuple =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
a__ : Dict =CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] =CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a__ : str =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a__ : int =self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
a__ : Optional[Any] =CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : str =self.get_image_processor()
a__ : Optional[int] =self.get_tokenizer()
a__ : Dict =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : str =self.prepare_image_inputs()
a__ : Any =image_processor(lowerCAmelCase__ , return_tensors="np" )
a__ : Optional[int] =processor(images=lowerCAmelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] =self.get_image_processor()
a__ : List[Any] =self.get_tokenizer()
a__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Union[str, Any] ="lower newer"
a__ : List[str] =processor(text=lowerCAmelCase__ )
a__ : str =tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Any =self.get_image_processor()
a__ : Dict =self.get_tokenizer()
a__ : Union[str, Any] =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Dict ="lower newer"
a__ : int =self.prepare_image_inputs()
a__ : Any =processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Union[str, Any] =self.get_image_processor()
a__ : Optional[Any] =self.get_tokenizer()
a__ : str =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : int =self.prepare_image_inputs()
a__ : Union[str, Any] =self.prepare_image_inputs()
a__ : Tuple =processor(images=lowerCAmelCase__ , visual_prompt=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] =self.get_image_processor()
a__ : Any =self.get_tokenizer()
a__ : Tuple =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Dict =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ : Optional[Any] =processor.batch_decode(lowerCAmelCase__ )
a__ : Dict =tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 95 | 0 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = None
snake_case_ = BloomTokenizerFast
snake_case_ = BloomTokenizerFast
snake_case_ = True
snake_case_ = False
snake_case_ = '''tokenizer_file'''
snake_case_ = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def lowercase_ ( self ) -> str:
'''simple docstring'''
super().setUp()
__lowerCamelCase = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
__lowerCamelCase = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
__lowerCamelCase = tokenizer.batch_encode_plus(lowerCamelCase__ )['input_ids']
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__=6 ) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__lowerCamelCase = 'This is a simple input'
__lowerCamelCase = ['This is a simple input 1', 'This is a simple input 2']
__lowerCamelCase = ('This is a simple input', 'This is a pair')
__lowerCamelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(lowerCamelCase__ , max_length=lowerCamelCase__ )
tokenizer_r.encode_plus(lowerCamelCase__ , max_length=lowerCamelCase__ )
tokenizer_r.batch_encode_plus(lowerCamelCase__ , max_length=lowerCamelCase__ )
tokenizer_r.encode(lowerCamelCase__ , max_length=lowerCamelCase__ )
tokenizer_r.batch_encode_plus(lowerCamelCase__ , max_length=lowerCamelCase__ )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
__lowerCamelCase = None # Hotfixing padding = None
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' )
# Simple input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' )
# Simple input
self.assertRaises(
lowerCamelCase__ , tokenizer_r.batch_encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' , )
# Pair input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' )
# Pair input
self.assertRaises(lowerCamelCase__ , tokenizer_r.encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' )
# Pair input
self.assertRaises(
lowerCamelCase__ , tokenizer_r.batch_encode_plus , lowerCamelCase__ , max_length=lowerCamelCase__ , padding='max_length' , )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = load_dataset('xnli' , 'all_languages' , split='test' , streaming=lowerCamelCase__ )
__lowerCamelCase = next(iter(lowerCamelCase__ ) )['premise'] # pick up one data
__lowerCamelCase = list(sample_data.values() )
__lowerCamelCase = list(map(tokenizer.encode , lowerCamelCase__ ) )
__lowerCamelCase = [tokenizer.decode(lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ ) for x in output_tokens]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 90 |
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(SCREAMING_SNAKE_CASE ) == 1:
return True
a__ : Union[str, Any] =series[1] - series[0]
for index in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Input list must be a non empty list" )
a__ : Any =0
for val in series:
answer += val
return answer / len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase_ : Union[str, Any] = {
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase_ : Optional[Any] = {
"""squeezebert/squeezebert-uncased""": 512,
"""squeezebert/squeezebert-mnli""": 512,
"""squeezebert/squeezebert-mnli-headless""": 512,
}
UpperCAmelCase_ : Any = {
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = SqueezeBertTokenizer
def __init__( self : str , lowercase_ : List[Any]=None , lowercase_ : Optional[Any]=None , lowercase_ : Tuple=True , lowercase_ : int="[UNK]" , lowercase_ : List[Any]="[SEP]" , lowercase_ : str="[PAD]" , lowercase_ : List[str]="[CLS]" , lowercase_ : Tuple="[MASK]" , lowercase_ : Optional[int]=True , lowercase_ : Optional[Any]=None , **lowercase_ : Tuple , ):
'''simple docstring'''
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
SCREAMING_SNAKE_CASE_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' , lowercase_) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowercase_) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowercase_) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ : Dict = getattr(lowercase_ , normalizer_state.pop('''type'''))
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_lower_case
SCREAMING_SNAKE_CASE_ : str = strip_accents
SCREAMING_SNAKE_CASE_ : List[str] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ : List[str] = normalizer_class(**lowercase_)
SCREAMING_SNAKE_CASE_ : Any = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Tuple , lowercase_ : str=None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[str] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self._tokenizer.model.save(lowercase_ , name=lowercase_)
return tuple(lowercase_)
| 91 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Tuple = """M-CLIP"""
def __init__( self , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=7_6_8 , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
a__ : int =transformerDimSize
a__ : Dict =imageDimSize
super().__init__(**lowerCAmelCase__ )
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Optional[Any] = MCLIPConfig
def __init__( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Tuple =XLMRobertaModel(lowerCAmelCase__ )
a__ : List[str] =torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] =self.transformer(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
a__ : int =(embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowerCAmelCase__ ), embs
| 95 | 0 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class a__ ( snake_case__ ):
_a : Optional[int] = """"""
_a : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_a : str = None # compression type in fsspec. ex: "gzip"
_a : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , _A = "" , _A = None , _A = None , **_A ):
"""simple docstring"""
super().__init__(self , **_A )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__lowerCAmelCase = fsspec.open(
_A , mode="rb" , protocol=_A , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__lowerCAmelCase = os.path.basename(self.file.path.split("::" )[0] )
__lowerCAmelCase = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
__lowerCAmelCase = None
@classmethod
def __SCREAMING_SNAKE_CASE( cls , _A ):
"""simple docstring"""
return super()._strip_protocol(_A ).lstrip("/" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
if self.dir_cache is None:
__lowerCAmelCase = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
__lowerCAmelCase = {f["name"]: f}
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return self.file.open().read()
def __SCREAMING_SNAKE_CASE( self , _A , _A = "rb" , _A=None , _A=True , _A=None , **_A , ):
"""simple docstring"""
__lowerCAmelCase = self._strip_protocol(_A )
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class a__ ( snake_case__ ):
_a : Any = """bz2"""
_a : Dict = """bz2"""
_a : Union[str, Any] = """.bz2"""
class a__ ( snake_case__ ):
_a : int = """gzip"""
_a : List[Any] = """gzip"""
_a : Optional[int] = """.gz"""
class a__ ( snake_case__ ):
_a : Optional[int] = """lz4"""
_a : Any = """lz4"""
_a : Tuple = """.lz4"""
class a__ ( snake_case__ ):
_a : Dict = """xz"""
_a : List[Any] = """xz"""
_a : Optional[Any] = """.xz"""
class a__ ( snake_case__ ):
_a : Union[str, Any] = """zstd"""
_a : int = """zstd"""
_a : int = """.zst"""
def __init__( self , _A , _A = "rb" , _A = None , _A = None , _A = DEFAULT_BLOCK_SIZE , **_A , ):
"""simple docstring"""
super().__init__(
fo=_A , mode=_A , target_protocol=_A , target_options=_A , block_size=_A , **_A , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__lowerCAmelCase = self.file.__enter__
class a__ :
def __init__( self , _A ):
"""simple docstring"""
__lowerCAmelCase = file_
def __enter__( self ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self , *_A , **_A ):
"""simple docstring"""
self._file.__exit__(*_A , **_A )
def __iter__( self ):
"""simple docstring"""
return iter(self._file )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return next(self._file )
def __getattr__( self , _A ):
"""simple docstring"""
return getattr(self._file , _A )
def fixed_enter(*_A , **_A ):
return WrappedFile(_enter(*_A , **_A ) )
__lowerCAmelCase = fixed_enter
| 92 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase : Any = 16
UpperCAmelCase : str = 32
def _A ( SCREAMING_SNAKE_CASE : Accelerator , SCREAMING_SNAKE_CASE : int = 16 ):
"""simple docstring"""
a__ : int =AutoTokenizer.from_pretrained("bert-base-cased" )
a__ : List[str] =load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
a__ : int =tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__ : Dict =datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : Dict =tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__ : Optional[Any] =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__ : str =16
elif accelerator.mixed_precision != "no":
a__ : Union[str, Any] =8
else:
a__ : List[str] =None
return tokenizer.pad(
SCREAMING_SNAKE_CASE , padding="longest" , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
a__ : Any =DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
a__ : int =DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase : str = mocked_dataloaders # noqa: F811
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE ) == "1":
a__ : Tuple =2
# Initialize accelerator
a__ : int =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : Optional[int] =config["lr"]
a__ : Union[str, Any] =int(config["num_epochs"] )
a__ : Any =int(config["seed"] )
a__ : Dict =int(config["batch_size"] )
a__ : int =evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
a__ : int =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a__ : Dict =batch_size // MAX_GPU_BATCH_SIZE
a__ : Tuple =MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE )
a__ , a__ : Optional[int] =get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : List[str] =AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ : List[str] =model.to(accelerator.device )
# Instantiate optimizer
a__ : List[Any] =AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
# Instantiate scheduler
a__ : Optional[int] =get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : Optional[int] =accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
a__ : Dict =model(**SCREAMING_SNAKE_CASE )
a__ : List[Any] =outputs.loss
a__ : List[str] =loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
a__ : Optional[Any] =0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a__ : Any =model(**SCREAMING_SNAKE_CASE )
a__ : str =outputs.logits.argmax(dim=-1 )
a__ , a__ : List[str] =accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(SCREAMING_SNAKE_CASE ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
a__ : Optional[Any] =predictions[: len(eval_dataloader.dataset ) - samples_seen]
a__ : Dict =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
a__ : Tuple =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE )
def _A ( ):
"""simple docstring"""
a__ : List[str] =argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
a__ : str =parser.parse_args()
a__ : Optional[int] ={"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 95 | 0 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(__SCREAMING_SNAKE_CASE ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , ) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] =size if size is not None else {"shortest_edge": 2_0}
a__ : List[str] =crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
a__ : Tuple =parent
a__ : Union[str, Any] =batch_size
a__ : List[str] =num_channels
a__ : List[Any] =image_size
a__ : str =min_resolution
a__ : Optional[int] =max_resolution
a__ : Tuple =do_resize
a__ : Union[str, Any] =size
a__ : List[Any] =do_center_crop
a__ : List[str] =crop_size
a__ : Optional[int] =do_flip_channel_order
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : int = MobileViTImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple =MobileViTImageProcessingTester(self )
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_flip_channel_order" ) )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
a__ : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
a__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : List[Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
a__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : int =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : int =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
a__ : List[str] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : List[str] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 95 | 0 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _snake_case ( _snake_case ):
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
with open(_lowerCamelCase , encoding='''utf-8''' ) as input_file:
a :List[str] = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
a :Dict = input_file.read()
a :Optional[int] = regexp.search(_lowerCamelCase )
return match
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
with open(_lowerCamelCase , encoding='''utf-8''' ) as input_file:
a :Dict = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
a :Optional[int] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
a :Optional[Any] = regexp.finditer(_lowerCamelCase )
a :List[Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = Path('''./datasets''' )
a :Optional[Any] = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_lowerCamelCase ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = Path('''./datasets''' )
a :int = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(_lowerCamelCase ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 94 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , ) -> Optional[int]:
'''simple docstring'''
a__ : str =size if size is not None else {"shortest_edge": 2_0}
a__ : Union[str, Any] =crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
a__ : Tuple =parent
a__ : Optional[int] =batch_size
a__ : Any =num_channels
a__ : List[str] =image_size
a__ : Dict =min_resolution
a__ : List[Any] =max_resolution
a__ : Dict =do_resize
a__ : Union[str, Any] =size
a__ : str =do_center_crop
a__ : List[str] =crop_size
def _lowercase ( self ) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] =MobileNetVaImageProcessingTester(self )
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "crop_size" ) )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Any =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
a__ : Dict =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def _lowercase ( self ) -> Any:
'''simple docstring'''
pass
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
a__ : List[Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : Dict =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : str =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
a__ : List[str] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : Union[str, Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Any =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
a__ : Optional[Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : str =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 95 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """new-model"""
if is_tf_available():
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = NewModelConfig
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : List[str] = 'bert-base-cased'
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Union[str, Any] = TFAutoModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
_lowerCamelCase : List[str] = 'bert-base-cased'
_lowerCamelCase : int = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : int = TFAutoModelForPreTraining.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : int = TFAutoModelForCausalLM.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : str = TFAutoModelForCausalLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : str = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : Tuple = TFAutoModelForMaskedLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_lowerCamelCase : str = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def A_ ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : List[str] = TFAutoModelForQuestionAnswering.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
@require_tensorflow_probability
def A_ ( self ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
_lowerCamelCase : Dict = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(lowercase )
_lowerCamelCase, _lowerCamelCase : List[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowercase , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : int = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 )
def A_ ( self ):
_lowerCamelCase : Any = TFAutoModelWithLMHead.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 )
def A_ ( self ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
_lowerCamelCase : List[str] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(lowercase , lowercase )
_lowerCamelCase : Optional[int] = copy.deepcopy(model.config )
_lowerCamelCase : Dict = ['FunnelBaseModel']
_lowerCamelCase : List[Any] = TFAutoModel.from_config(lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase )
_lowerCamelCase : Tuple = TFAutoModel.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
def A_ ( self ):
try:
AutoConfig.register('new-model' , lowercase )
_lowerCamelCase : Tuple = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowercase ):
auto_class.register(lowercase , lowercase )
auto_class.register(lowercase , lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase ):
auto_class.register(lowercase , lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
_lowerCamelCase : Optional[Any] = BertModelTester(self ).get_config()
_lowerCamelCase : Dict = NewModelConfig(**tiny_config.to_dict() )
_lowerCamelCase : int = auto_class.from_config(lowercase )
self.assertIsInstance(lowercase , lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase )
_lowerCamelCase : List[Any] = auto_class.from_pretrained(lowercase )
self.assertIsInstance(lowercase , lowercase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def A_ ( self ):
with self.assertRaisesRegex(
lowercase , 'bert-base is not a local folder and is not a valid model identifier' ):
_lowerCamelCase : Union[str, Any] = TFAutoModel.from_pretrained('bert-base' )
def A_ ( self ):
with self.assertRaisesRegex(
lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_lowerCamelCase : str = TFAutoModel.from_pretrained(lowercase , revision='aaaaaa' )
def A_ ( self ):
with self.assertRaisesRegex(
lowercase , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
_lowerCamelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def A_ ( self ):
with self.assertRaisesRegex(lowercase , 'Use `from_pt=True` to load this model' ):
_lowerCamelCase : Tuple = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def A_ ( self ):
# Make sure we have cached the model.
_lowerCamelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
_lowerCamelCase : Optional[int] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
_lowerCamelCase : int = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
_lowerCamelCase : List[Any] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 ) | 96 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Any = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 95 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__snake_case = logging.get_logger(__name__)
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **UpperCamelCase_ ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCamelCase__ :int = deprecated_arg[3:]
UpperCamelCase__ :int = not kwargs.pop(UpperCamelCase_ )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
UpperCamelCase__ :int = kwargs.pop('''tpu_name''' , self.tpu_name )
UpperCamelCase__ :List[str] = kwargs.pop('''device_idx''' , self.device_idx )
UpperCamelCase__ :List[str] = kwargs.pop('''eager_mode''' , self.eager_mode )
UpperCamelCase__ :Dict = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**UpperCamelCase_ )
_a = field(
default=A__ , metadata={'help': 'Name of TPU'} , )
_a = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
_a = field(default=A__ , metadata={'help': 'Benchmark models in eager model.'} )
_a = field(
default=A__ , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
UpperCamelCase__ :List[str] = None
if self.tpu:
try:
if self.tpu_name:
UpperCamelCase__ :Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
UpperCamelCase__ :Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
UpperCamelCase__ :Dict = None
return tpu
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
UpperCamelCase__ :Any = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
UpperCamelCase__ :Optional[Any] = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
UpperCamelCase__ :List[str] = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.n_gpu > 0 | 97 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Any = """swin2sr"""
_lowercase : Tuple = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowerCAmelCase__=6_4 , lowerCAmelCase__=1 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8_0 , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=8 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=2 , lowerCAmelCase__=1.0 , lowerCAmelCase__="1conv" , lowerCAmelCase__="pixelshuffle" , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
a__ : Optional[Any] =image_size
a__ : Dict =patch_size
a__ : Tuple =num_channels
a__ : Union[str, Any] =embed_dim
a__ : Optional[Any] =depths
a__ : List[str] =len(lowerCAmelCase__ )
a__ : Any =num_heads
a__ : Any =window_size
a__ : str =mlp_ratio
a__ : List[str] =qkv_bias
a__ : Dict =hidden_dropout_prob
a__ : List[str] =attention_probs_dropout_prob
a__ : Dict =drop_path_rate
a__ : Optional[Any] =hidden_act
a__ : Union[str, Any] =use_absolute_embeddings
a__ : Optional[Any] =layer_norm_eps
a__ : List[Any] =initializer_range
a__ : int =upscale
a__ : Optional[int] =img_range
a__ : Any =resi_connection
a__ : Optional[Any] =upsampler
| 95 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def a_ ( lowerCamelCase , lowerCamelCase ):
if len(lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
UpperCAmelCase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def a_ ( lowerCamelCase , lowerCamelCase ):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def a_ ( lowerCamelCase , lowerCamelCase ):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def a_ ( lowerCamelCase ):
if len(lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
UpperCAmelCase__ = len(lowerCamelCase )
UpperCAmelCase__ = matrix_length // 2
UpperCAmelCase__ = [[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase )]
UpperCAmelCase__ = [
[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )
]
UpperCAmelCase__ = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase )]
UpperCAmelCase__ = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )]
return top_left, top_right, bot_left, bot_right
def a_ ( lowerCamelCase ):
return len(lowerCamelCase ), len(matrix[0] )
def a_ ( lowerCamelCase ):
print('\n'.join(str(lowerCamelCase ) for line in matrix ) )
def a_ ( lowerCamelCase , lowerCamelCase ):
if matrix_dimensions(lowerCamelCase ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = split_matrix(lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = split_matrix(lowerCamelCase )
UpperCAmelCase__ = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
UpperCAmelCase__ = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
UpperCAmelCase__ = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
UpperCAmelCase__ = matrix_addition(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = matrix_addition(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
# construct the new matrix from our 4 quadrants
UpperCAmelCase__ = []
for i in range(len(lowerCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def a_ ( lowerCamelCase , lowerCamelCase ):
if matrix_dimensions(lowerCamelCase )[1] != matrix_dimensions(lowerCamelCase )[0]:
UpperCAmelCase__ = (
'Unable to multiply these matrices, please check the dimensions.\n'
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(lowerCamelCase )
UpperCAmelCase__ = matrix_dimensions(lowerCamelCase )
UpperCAmelCase__ = matrix_dimensions(lowerCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
UpperCAmelCase__ = max(*lowerCamelCase , *lowerCamelCase )
UpperCAmelCase__ = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase ) ) ) )
UpperCAmelCase__ = matrixa
UpperCAmelCase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
UpperCAmelCase__ = actual_strassen(lowerCamelCase , lowerCamelCase )
# Removing the additional zeros
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCAmelCase__ : List[str] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 98 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __lowerCAmelCase :
pass
| 95 | 0 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=3 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ) -> Any:
'''simple docstring'''
a__ : Tuple = parent
a__ : str = batch_size
a__ : Union[str, Any] = seq_length
a__ : Optional[int] = is_training
a__ : str = use_input_mask
a__ : int = use_token_type_ids
a__ : int = use_labels
a__ : List[Any] = vocab_size
a__ : Dict = hidden_size
a__ : List[Any] = num_hidden_layers
a__ : Tuple = num_attention_heads
a__ : Tuple = intermediate_size
a__ : Tuple = hidden_act
a__ : Dict = hidden_dropout_prob
a__ : Tuple = attention_probs_dropout_prob
a__ : Optional[int] = max_position_embeddings
a__ : Tuple = type_vocab_size
a__ : Optional[int] = type_sequence_label_size
a__ : Union[str, Any] = initializer_range
a__ : Any = num_labels
a__ : Any = num_choices
a__ : List[Any] = scope
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__ : int = None
if self.use_input_mask:
a__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
a__ : Any = None
a__ : str = None
a__ : int = None
a__ : Dict = None
if self.use_labels:
a__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a__ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices)
a__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=lowercase , )
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
a__ : Tuple = FalconModel(config=lowercase)
model.to(lowercase)
model.eval()
a__ : Tuple = model(lowercase , attention_mask=lowercase)
a__ : Any = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Optional[int]:
'''simple docstring'''
a__ : str = True
a__ : Dict = FalconModel(lowercase)
model.to(lowercase)
model.eval()
a__ : Union[str, Any] = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , )
a__ : int = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , )
a__ : str = model(lowercase , attention_mask=lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = FalconForCausalLM(config=lowercase)
model.to(lowercase)
model.eval()
a__ : Any = model(lowercase , attention_mask=lowercase , labels=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Any:
'''simple docstring'''
a__ : List[str] = True
a__ : Dict = True
a__ : Optional[int] = FalconForCausalLM(config=lowercase)
model.to(lowercase)
model.eval()
# first forward pass
a__ : str = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , use_cache=lowercase , )
a__ : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a__ : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size)
a__ : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
a__ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1)
a__ : Optional[Any] = torch.cat([input_mask, next_mask] , dim=-1)
a__ : List[Any] = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , output_hidden_states=lowercase , )['hidden_states'][0]
a__ : List[Any] = model(
lowercase , attention_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , past_key_values=lowercase , output_hidden_states=lowercase , )['hidden_states'][0]
# select random slice
a__ : Any = ids_tensor((1,) , output_from_past.shape[-1]).item()
a__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
a__ : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-3))
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Tuple = config_and_inputs
a__ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Union[str, Any] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A : List[str] = (FalconForCausalLM,) if is_torch_available() else ()
__A : Tuple = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : Any = False
__A : Any = False
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : int = FalconModelTester(self)
a__ : Dict = ConfigTester(self , config_class=lowercase , hidden_size=37)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ , *a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
a__ : Tuple = alibi
self.model_tester.create_and_check_model(lowercase , *lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ , a__ : int = self.model_tester.prepare_config_and_inputs_for_common()
a__ : List[str] = 3
a__ : str = input_dict['input_ids']
a__ : Optional[Any] = input_ids.ne(1).to(lowercase)
a__ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
a__ : List[Any] = FalconForSequenceClassification(lowercase)
model.to(lowercase)
model.eval()
a__ : Any = model(lowercase , attention_mask=lowercase , labels=lowercase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ , a__ : int = self.model_tester.prepare_config_and_inputs_for_common()
a__ : int = 3
a__ : Any = 'single_label_classification'
a__ : Any = input_dict['input_ids']
a__ : Any = input_ids.ne(1).to(lowercase)
a__ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
a__ : Tuple = FalconForSequenceClassification(lowercase)
model.to(lowercase)
model.eval()
a__ : Tuple = model(lowercase , attention_mask=lowercase , labels=lowercase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def __lowercase ( self) -> int:
'''simple docstring'''
a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : List[str] = input_dict['input_ids']
a__ : Optional[Any] = FalconForCausalLM(lowercase)
model.to(lowercase)
model.eval()
a__ : Tuple = model(lowercase , use_cache=lowercase)
a__ : List[Any] = input_ids.shape[0]
a__ : int = model._convert_to_rw_cache(result.past_key_values)
a__ : Optional[Any] = model._convert_cache_to_standard_format(lowercase , lowercase)
for layer in range(len(lowercase)):
for tensor_idx in range(2):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3)
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4)
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx]))
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Optional[Any] = 3
a__ : Tuple = 'multi_label_classification'
a__ : Union[str, Any] = input_dict['input_ids']
a__ : List[str] = input_ids.ne(1).to(lowercase)
a__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
a__ : List[str] = FalconForSequenceClassification(lowercase)
model.to(lowercase)
model.eval()
a__ : Dict = model(lowercase , attention_mask=lowercase , labels=lowercase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
for model_class in self.all_generative_model_classes:
a__ , a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(lowercase , 'use_cache'):
return
a__ : Optional[int] = model_class(lowercase).to(lowercase)
if "use_cache" not in inputs:
a__ : Dict = True
a__ : List[str] = model(**lowercase)
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
a__ : Optional[int] = (
getattr(lowercase , 'decoder_layers' , lowercase)
or getattr(lowercase , 'num_decoder_layers' , lowercase)
or config.num_hidden_layers
)
a__ : Tuple = getattr(lowercase , 'num_kv_heads' , config.num_attention_heads)
a__ : int = getattr(lowercase , 'd_model' , config.hidden_size)
a__ : Optional[Any] = embed_dim // num_attention_heads
a__ : Any = outputs['past_key_values']
self.assertEqual(len(lowercase) , lowercase)
a__ , a__ : Optional[Any] = inputs['input_ids'].shape
for i in range(lowercase):
if config.new_decoder_architecture:
a__ : int = config.num_attention_heads
elif config.multi_query:
a__ : Dict = 1
self.assertEqual(len(past_kv[0]) , 2) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b')
a__ : List[str] = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b')
model.eval()
model.to(lowercase)
a__ : int = tokenizer('My favorite food is' , return_tensors='pt').to(lowercase)
a__ : Union[str, Any] = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
a__ : List[Any] = model.generate(**lowercase , do_sample=lowercase , max_new_tokens=19)
a__ : Tuple = tokenizer.batch_decode(lowercase)[0]
self.assertEqual(lowercase , lowercase)
@slow
def __lowercase ( self) -> Tuple:
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
a__ : int = AutoTokenizer.from_pretrained(lowercase)
a__ : List[Any] = FalconForCausalLM.from_pretrained(lowercase)
model.eval()
model.to(lowercase)
a__ : List[str] = tokenizer('My favorite food is' , return_tensors='pt').to(lowercase)
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**lowercase , do_sample=lowercase , max_new_tokens=4)
model.generate(**lowercase , do_sample=lowercase , max_new_tokens=4)
model.generate(**lowercase , num_beams=2 , max_new_tokens=4)
@slow
def __lowercase ( self) -> List[str]:
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
a__ : List[str] = AutoTokenizer.from_pretrained(lowercase)
a__ : int = FalconForCausalLM.from_pretrained(lowercase)
model.eval()
model.to(device=lowercase)
a__ : int = tokenizer('My favorite food is' , return_tensors='pt').to(lowercase)
# Test results are the same with and without cache
a__ : List[Any] = model.generate(**lowercase , do_sample=lowercase , max_new_tokens=20 , use_cache=lowercase)
a__ : Optional[Any] = model.generate(**lowercase , do_sample=lowercase , max_new_tokens=20 , use_cache=lowercase)
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0)
| 99 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Optional[Any] = """philschmid/bart-large-cnn-samsum"""
_lowercase : List[Any] = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
_lowercase : Any = """summarizer"""
_lowercase : Any = AutoTokenizer
_lowercase : str = AutoModelForSeqaSeqLM
_lowercase : Optional[int] = ["""text"""]
_lowercase : Optional[int] = ["""text"""]
def _lowercase ( self , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
return self.pre_processor(lowerCAmelCase__ , return_tensors="pt" , truncation=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.model.generate(**lowerCAmelCase__ )[0]
def _lowercase ( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return self.pre_processor.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
| 95 | 0 |
"""simple docstring"""
import math
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(UpperCamelCase_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__magic_name__ = "Enter the base and the power separated by a comma: "
__magic_name__, __magic_name__ = map(int, input(prompt).split(","))
__magic_name__, __magic_name__ = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
__magic_name__ = res(xa, ya)
__magic_name__ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal")
| 100 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase : List[Any] = [
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
UpperCAmelCase : Optional[int] = {
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase : Optional[int] = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
UpperCAmelCase : Optional[int] = {F"""funnel-transformer/{name}""": {"""do_lower_case""": True} for name in _model_names}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_INIT_CONFIGURATION
_lowercase : Union[str, Any] = FunnelTokenizer
_lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : int = 2
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<sep>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<cls>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__="##" , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , clean_text=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , wordpieces_prefix=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : Optional[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
a__ : List[str] =getattr(lowerCAmelCase__ , normalizer_state.pop("type" ) )
a__ : Union[str, Any] =do_lower_case
a__ : Any =strip_accents
a__ : Optional[Any] =tokenize_chinese_chars
a__ : Dict =normalizer_class(**lowerCAmelCase__ )
a__ : Any =do_lower_case
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> str:
'''simple docstring'''
a__ : Dict =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
a__ : Optional[int] =[self.sep_token_id]
a__ : Union[str, Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
a__ : Tuple =self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 95 | 0 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Optional[Any] =MobileBertTokenizer
lowercase_ : Tuple =MobileBertTokenizerFast
lowercase_ : Optional[int] =True
lowercase_ : str =True
lowercase_ : Dict =filter_non_english
lowercase_ : Union[str, Any] ='''google/mobilebert-uncased'''
def A__ ( self):
super().setUp()
lowercase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
lowercase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A__ ( self ,A__):
lowercase = '''UNwant\u00E9d,running'''
lowercase = '''unwanted, running'''
return input_text, output_text
def A__ ( self):
lowercase = self.tokenizer_class(self.vocab_file)
lowercase = tokenizer.tokenize('''UNwant\u00E9d,running''')
self.assertListEqual(A__ ,['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__) ,[9, 6, 7, 1_2, 1_0, 1_1])
def A__ ( self):
if not self.test_rust_tokenizer:
return
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
lowercase = '''UNwant\u00E9d,running'''
lowercase = tokenizer.tokenize(A__)
lowercase = rust_tokenizer.tokenize(A__)
self.assertListEqual(A__ ,A__)
lowercase = tokenizer.encode(A__ ,add_special_tokens=A__)
lowercase = rust_tokenizer.encode(A__ ,add_special_tokens=A__)
self.assertListEqual(A__ ,A__)
lowercase = self.get_rust_tokenizer()
lowercase = tokenizer.encode(A__)
lowercase = rust_tokenizer.encode(A__)
self.assertListEqual(A__ ,A__)
# With lower casing
lowercase = self.get_tokenizer(do_lower_case=A__)
lowercase = self.get_rust_tokenizer(do_lower_case=A__)
lowercase = '''UNwant\u00E9d,running'''
lowercase = tokenizer.tokenize(A__)
lowercase = rust_tokenizer.tokenize(A__)
self.assertListEqual(A__ ,A__)
lowercase = tokenizer.encode(A__ ,add_special_tokens=A__)
lowercase = rust_tokenizer.encode(A__ ,add_special_tokens=A__)
self.assertListEqual(A__ ,A__)
lowercase = self.get_rust_tokenizer()
lowercase = tokenizer.encode(A__)
lowercase = rust_tokenizer.encode(A__)
self.assertListEqual(A__ ,A__)
def A__ ( self):
lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''') ,['''ah''', '''\u535A''', '''\u63A8''', '''zz'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') ,['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''hello'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''h\u00E9llo'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''hello'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''])
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''hello'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''') ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__ ,strip_accents=A__)
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''') ,['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''])
def A__ ( self):
lowercase = BasicTokenizer(do_lower_case=A__ ,never_split=['''[UNK]'''])
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''') ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''])
def A__ ( self):
lowercase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowercase = {}
for i, token in enumerate(A__):
lowercase = i
lowercase = WordpieceTokenizer(vocab=A__ ,unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') ,[])
self.assertListEqual(tokenizer.tokenize('''unwanted running''') ,['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''])
self.assertListEqual(tokenizer.tokenize('''unwantedX running''') ,['''[UNK]''', '''runn''', '''##ing'''])
def A__ ( self):
self.assertTrue(_is_whitespace(''' '''))
self.assertTrue(_is_whitespace('''\t'''))
self.assertTrue(_is_whitespace('''\r'''))
self.assertTrue(_is_whitespace('''\n'''))
self.assertTrue(_is_whitespace('''\u00A0'''))
self.assertFalse(_is_whitespace('''A'''))
self.assertFalse(_is_whitespace('''-'''))
def A__ ( self):
self.assertTrue(_is_control('''\u0005'''))
self.assertFalse(_is_control('''A'''))
self.assertFalse(_is_control(''' '''))
self.assertFalse(_is_control('''\t'''))
self.assertFalse(_is_control('''\r'''))
def A__ ( self):
self.assertTrue(_is_punctuation('''-'''))
self.assertTrue(_is_punctuation('''$'''))
self.assertTrue(_is_punctuation('''`'''))
self.assertTrue(_is_punctuation('''.'''))
self.assertFalse(_is_punctuation('''A'''))
self.assertFalse(_is_punctuation(''' '''))
def A__ ( self):
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']])
self.assertListEqual(
[rust_tokenizer.tokenize(A__) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']])
@slow
def A__ ( self):
lowercase = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''')
lowercase = tokenizer.encode('''sequence builders''' ,add_special_tokens=A__)
lowercase = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=A__)
lowercase = tokenizer.build_inputs_with_special_tokens(A__)
lowercase = tokenizer.build_inputs_with_special_tokens(A__ ,A__)
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def A__ ( self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase = self.rust_tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
lowercase = tokenizer_r.encode_plus(
A__ ,return_attention_mask=A__ ,return_token_type_ids=A__ ,return_offsets_mapping=A__ ,add_special_tokens=A__ ,)
lowercase = tokenizer_r.do_lower_case if hasattr(A__ ,'''do_lower_case''') else False
lowercase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['''input_ids''']))
self.assertEqual([e[0] for e in expected_results] ,tokens['''offset_mapping'''])
def A__ ( self):
lowercase = ['''的''', '''人''', '''有''']
lowercase = ''''''.join(A__)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase = True
lowercase = self.tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = self.rust_tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = tokenizer_p.encode(A__ ,add_special_tokens=A__)
lowercase = tokenizer_r.encode(A__ ,add_special_tokens=A__)
lowercase = tokenizer_r.convert_ids_to_tokens(A__)
lowercase = tokenizer_p.convert_ids_to_tokens(A__)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ ,A__)
self.assertListEqual(A__ ,A__)
lowercase = False
lowercase = self.rust_tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = self.tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = tokenizer_r.encode(A__ ,add_special_tokens=A__)
lowercase = tokenizer_p.encode(A__ ,add_special_tokens=A__)
lowercase = tokenizer_r.convert_ids_to_tokens(A__)
lowercase = tokenizer_p.convert_ids_to_tokens(A__)
# it is expected that only the first Chinese character is not preceded by "##".
lowercase = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(A__)
]
self.assertListEqual(A__ ,A__)
self.assertListEqual(A__ ,A__)
| 101 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = "arrow" , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : int =load_from_cache_file
a__ : Tuple =file_format
a__ : List[Any] =Spark(
df=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , working_dir=lowerCAmelCase__ , **lowerCAmelCase__ , )
def _lowercase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
a__ : str =None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCAmelCase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 95 | 0 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def lowercase ( _snake_case : int="ro" , _snake_case : Dict="en" , _snake_case : int="wmt16" , _snake_case : List[str]=None ) ->None:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
__snake_case : Union[str, Any] = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
__snake_case : Optional[Any] = datasets.load_dataset(_snake_case , _snake_case )
if save_dir is None:
__snake_case : int = f"""{dataset}-{pair}"""
__snake_case : Union[str, Any] = Path(_snake_case )
save_dir.mkdir(exist_ok=_snake_case )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
__snake_case : Union[str, Any] = '''val''' if split == '''validation''' else split
__snake_case : List[str] = save_dir.joinpath(f"""{fn}.source""" )
__snake_case : int = save_dir.joinpath(f"""{fn}.target""" )
__snake_case : Union[str, Any] = src_path.open('''w+''' )
__snake_case : Union[str, Any] = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__snake_case : List[str] = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 102 |
from math import pi
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 95 | 0 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __snake_case ( UpperCamelCase_ ):
_a = ['''vqvae''']
def __init__( self : Tuple , A_ : AutoencoderKL , A_ : UNetaDConditionModel , A_ : Mel , A_ : Union[DDIMScheduler, DDPMScheduler] , ):
super().__init__()
self.register_modules(unet=A_ , scheduler=A_ , mel=A_ , vqvae=A_)
def UpperCAmelCase__ ( self : Any):
return 5_0 if isinstance(self.scheduler , A_) else 1_0_0_0
@torch.no_grad()
def __call__( self : Any , A_ : int = 1 , A_ : str = None , A_ : np.ndarray = None , A_ : int = 0 , A_ : int = 0 , A_ : int = None , A_ : torch.Generator = None , A_ : float = 0 , A_ : float = 0 , A_ : torch.Generator = None , A_ : float = 0 , A_ : torch.Tensor = None , A_ : torch.Tensor = None , A_ : int=True , ):
lowerCAmelCase_ : Union[str, Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(A_)
lowerCAmelCase_ : int = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
lowerCAmelCase_ : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowerCAmelCase_ : int = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=A_ , device=self.device , )
lowerCAmelCase_ : Tuple = noise
lowerCAmelCase_ : Union[str, Any] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(A_ , A_)
lowerCAmelCase_ : Optional[int] = self.mel.audio_slice_to_image(A_)
lowerCAmelCase_ : int = np.frombuffer(input_image.tobytes() , dtype='''uint8''').reshape(
(input_image.height, input_image.width))
lowerCAmelCase_ : str = (input_image / 2_5_5) * 2 - 1
lowerCAmelCase_ : List[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
lowerCAmelCase_ : Optional[Any] = self.vqvae.encode(torch.unsqueeze(A_ , 0)).latent_dist.sample(
generator=A_)[0]
lowerCAmelCase_ : Optional[int] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowerCAmelCase_ : List[str] = self.scheduler.add_noise(A_ , A_ , self.scheduler.timesteps[start_step - 1])
lowerCAmelCase_ : Union[str, Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowerCAmelCase_ : List[Any] = int(mask_start_secs * pixels_per_second)
lowerCAmelCase_ : Optional[Any] = int(mask_end_secs * pixels_per_second)
lowerCAmelCase_ : str = self.scheduler.add_noise(A_ , A_ , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , A_):
lowerCAmelCase_ : Optional[Any] = self.unet(A_ , A_ , A_)['''sample''']
else:
lowerCAmelCase_ : Dict = self.unet(A_ , A_)['''sample''']
if isinstance(self.scheduler , A_):
lowerCAmelCase_ : List[Any] = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , eta=A_ , generator=A_ , )['''prev_sample''']
else:
lowerCAmelCase_ : int = self.scheduler.step(
model_output=A_ , timestep=A_ , sample=A_ , generator=A_ , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
lowerCAmelCase_ : Optional[Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
lowerCAmelCase_ : Any = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowerCAmelCase_ : Tuple = 1 / self.vqvae.config.scaling_factor * images
lowerCAmelCase_ : Tuple = self.vqvae.decode(A_)['''sample''']
lowerCAmelCase_ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1)
lowerCAmelCase_ : List[str] = images.cpu().permute(0 , 2 , 3 , 1).numpy()
lowerCAmelCase_ : Union[str, Any] = (images * 2_5_5).round().astype('''uint8''')
lowerCAmelCase_ : Optional[Any] = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(A_ , mode='''RGB''').convert('''L''') for _ in images))
lowerCAmelCase_ : Optional[Any] = [self.mel.image_to_audio(A_) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(A_)[:, np.newaxis, :]) , **ImagePipelineOutput(A_))
@torch.no_grad()
def UpperCAmelCase__ ( self : Optional[int] , A_ : List[Image.Image] , A_ : int = 5_0):
assert isinstance(self.scheduler , A_)
self.scheduler.set_timesteps(A_)
lowerCAmelCase_ : int = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''').reshape((1, image.height, image.width)) for image in images])
lowerCAmelCase_ : Any = (sample / 2_5_5) * 2 - 1
lowerCAmelCase_ : List[str] = torch.Tensor(A_).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
lowerCAmelCase_ : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowerCAmelCase_ : Tuple = self.scheduler.alphas_cumprod[t]
lowerCAmelCase_ : Optional[int] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowerCAmelCase_ : int = 1 - alpha_prod_t
lowerCAmelCase_ : Optional[Any] = self.unet(A_ , A_)['''sample''']
lowerCAmelCase_ : Optional[Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowerCAmelCase_ : Tuple = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowerCAmelCase_ : List[str] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase__ ( A_ : torch.Tensor , A_ : torch.Tensor , A_ : float):
lowerCAmelCase_ : Dict = acos(torch.dot(torch.flatten(A_) , torch.flatten(A_)) / torch.norm(A_) / torch.norm(A_))
return sin((1 - alpha) * theta) * xa / sin(A_) + sin(alpha * theta) * xa / sin(A_)
| 103 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCAmelCase : int = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
a__ : Optional[int] =XLNetConfig.from_json_file(SCREAMING_SNAKE_CASE )
a__ : Dict =finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
a__ : List[str] =finetuning_task
a__ : Tuple =GLUE_TASKS_NUM_LABELS[finetuning_task]
a__ : List[Any] =XLNetForSequenceClassification(SCREAMING_SNAKE_CASE )
elif "squad" in finetuning_task:
a__ : Optional[int] =finetuning_task
a__ : Dict =XLNetForQuestionAnswering(SCREAMING_SNAKE_CASE )
else:
a__ : List[Any] =XLNetLMHeadModel(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'''Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
print(f'''Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
UpperCAmelCase : int = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 95 | 0 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _A ( A__ = 8 ):
"""simple docstring"""
__lowercase = ascii_letters + digits + punctuation
return "".join(secrets.choice(A__ ) for _ in range(A__ ) )
def _A ( A__ , A__ ):
"""simple docstring"""
i -= len(A__ )
__lowercase = i // 3
__lowercase = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
__lowercase = (
chars_incl
+ random(A__ , quotient + remainder )
+ random(A__ , A__ )
+ random(A__ , A__ )
)
__lowercase = list(A__ )
shuffle(A__ )
return "".join(A__ )
# random is a generalised function for letters, characters and numbers
def _A ( A__ , A__ ):
"""simple docstring"""
return "".join(secrets.choice(A__ ) for _ in range(A__ ) )
def _A ( A__ , A__ ):
"""simple docstring"""
pass # Put your code here...
def _A ( A__ , A__ ):
"""simple docstring"""
pass # Put your code here...
def _A ( A__ , A__ ):
"""simple docstring"""
pass # Put your code here...
def _A ( A__ , A__ = 8 ):
"""simple docstring"""
if len(A__ ) < min_length:
# Your Password must be at least 8 characters long
return False
__lowercase = any(char in ascii_uppercase for char in password )
__lowercase = any(char in ascii_lowercase for char in password )
__lowercase = any(char in digits for char in password )
__lowercase = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _A ( ):
"""simple docstring"""
__lowercase = int(input('''Please indicate the max length of your password: ''' ).strip() )
__lowercase = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' , password_generator(A__ ) )
print(
'''Alternative Password generated:''' , alternative_password_generator(A__ , A__ ) , )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 104 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : List[Any] = """canine"""
def __init__( self , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_6_3_8_4 , lowerCAmelCase__=1_6 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__=0XE0_00 , lowerCAmelCase__=0XE0_01 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=8 , lowerCAmelCase__=1_6_3_8_4 , lowerCAmelCase__=1_2_8 , **lowerCAmelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Optional[int] =max_position_embeddings
a__ : str =hidden_size
a__ : Optional[Any] =num_hidden_layers
a__ : Tuple =num_attention_heads
a__ : Optional[Any] =intermediate_size
a__ : Optional[int] =hidden_act
a__ : List[Any] =hidden_dropout_prob
a__ : Union[str, Any] =attention_probs_dropout_prob
a__ : Optional[Any] =initializer_range
a__ : Union[str, Any] =type_vocab_size
a__ : Optional[int] =layer_norm_eps
# Character config:
a__ : int =downsampling_rate
a__ : Optional[Any] =upsampling_kernel_size
a__ : Union[str, Any] =num_hash_functions
a__ : Any =num_hash_buckets
a__ : int =local_transformer_stride
| 95 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class __UpperCamelCase ( a__ ):
lowerCamelCase : str
lowerCamelCase : int
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->list[str]:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(_lowercase ) )]
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->BWTTransformDict:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
a : Optional[int] = all_rotations(_lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
a : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_lowercase ),
}
return response
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : int ) ->str:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
a : Tuple = int(_lowercase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(_lowercase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
a : Any = [""] * len(_lowercase )
for _ in range(len(_lowercase ) ):
for i in range(len(_lowercase ) ):
a : Tuple = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
a : Dict = '''Provide a string that I will generate its BWT transform: '''
a : Any = input(entry_msg).strip()
a : str = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result["bwt_string"]}\''''
)
a : int = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '''
F'''we get original string \'{original_string}\''''
)
| 105 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCAmelCase : int = False
class __lowerCAmelCase ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : str =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
a__ : Optional[Any] =torch.manual_seed(0 )
a__ : Optional[Any] =pipe.dual_guided(
prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase__ )
a__ : str =VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : Optional[Any] =generator.manual_seed(0 )
a__ : Tuple =pipe.dual_guided(
prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : str =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : Optional[Any] ="cyberpunk 2077"
a__ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
a__ : Union[str, Any] =torch.manual_seed(0 )
a__ : Tuple =pipe.dual_guided(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" , ).images
a__ : int =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Any =np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
a__ : str ="A painting of a squirrel eating a burger "
a__ : Optional[int] =torch.manual_seed(0 )
a__ : str =pipe.text_to_image(
prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" ).images
a__ : Any =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Optional[int] =np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
a__ : Optional[Any] =pipe.image_variation(lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="numpy" ).images
a__ : Union[str, Any] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Any =np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 95 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : Optional[Any] = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 106 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __lowerCAmelCase :
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
def _lowercase ( self ) -> int:
'''simple docstring'''
raise NotImplementedError()
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : str =tokenizer
a__ : List[str] =skip_prompt
a__ : List[Any] =decode_kwargs
# variables used in the streaming process
a__ : Dict =[]
a__ : int =0
a__ : str =True
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
a__ : Any =value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
a__ : Dict =False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
a__ : List[Any] =text[self.print_len :]
a__ : List[str] =[]
a__ : Optional[int] =0
# If the last token is a CJK character, we print the characters.
elif len(lowerCAmelCase__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
a__ : List[str] =text[self.print_len :]
self.print_len += len(lowerCAmelCase__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
a__ : str =text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(lowerCAmelCase__ )
self.on_finalized_text(lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
if len(self.token_cache ) > 0:
a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
a__ : List[Any] =text[self.print_len :]
a__ : List[str] =[]
a__ : Optional[int] =0
else:
a__ : Union[str, Any] =""
a__ : Any =True
self.on_finalized_text(lowerCAmelCase__ , stream_end=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Optional[Any]:
'''simple docstring'''
print(lowerCAmelCase__ , flush=lowerCAmelCase__ , end="" if not stream_end else None )
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : str =Queue()
a__ : Optional[Any] =None
a__ : Any =timeout
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> List[str]:
'''simple docstring'''
self.text_queue.put(lowerCAmelCase__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> Dict:
'''simple docstring'''
return self
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : int =self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 95 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__lowerCAmelCase : List[str] = None
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[str] = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
__lowerCAmelCase : str = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
__lowerCAmelCase : int = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : Optional[int] = MBartTokenizer
SCREAMING_SNAKE_CASE_ : List[int] = []
SCREAMING_SNAKE_CASE_ : List[int] = []
def __init__( self : str , __lowerCamelCase : List[str]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : Tuple="</s>" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : str="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : List[str]="<mask>" , __lowerCamelCase : Dict=None , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Dict , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it
a = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
a = vocab_file
a = False if not self.vocab_file else True
a = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
a = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a = src_lang if src_lang is not None else "en_XX"
a = self.convert_tokens_to_ids(self._src_lang )
a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self : Dict ) -> str:
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : str ) -> None:
a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] , __lowerCamelCase : Optional[str] , **__lowerCamelCase : List[Any] ) -> Tuple:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a = src_lang
a = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
a = self.convert_tokens_to_ids(__lowerCamelCase )
a = tgt_lang_id
return inputs
def __UpperCAmelCase ( self : str , __lowerCamelCase : List[str] , __lowerCamelCase : str = "en_XX" , __lowerCamelCase : Optional[List[str]] = None , __lowerCamelCase : str = "ro_RO" , **__lowerCamelCase : str , ) -> BatchEncoding:
a = src_lang
a = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[str] ) -> None:
a = self.convert_tokens_to_ids(__lowerCamelCase )
a = []
a = [self.eos_token_id, self.cur_lang_code]
a = self.convert_ids_to_tokens(self.prefix_tokens )
a = self.convert_ids_to_tokens(self.suffix_tokens )
a = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : str ) -> None:
a = self.convert_tokens_to_ids(__lowerCamelCase )
a = []
a = [self.eos_token_id, self.cur_lang_code]
a = self.convert_ids_to_tokens(self.prefix_tokens )
a = self.convert_ids_to_tokens(self.suffix_tokens )
a = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
a = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
return (out_vocab_file,)
| 107 |
def _A ( SCREAMING_SNAKE_CASE : int = 50 ):
"""simple docstring"""
a__ : Any =[1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 95 | 0 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCAmelCase__ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
lowerCAmelCase__ = '''path-to-your-trained-model'''
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCAmelCase__ = pipe.to(device)
# to channels last
lowerCAmelCase__ = pipe.unet.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.vae.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCAmelCase__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCAmelCase__ = torch.randn(2, 4, 64, 64)
lowerCAmelCase__ = torch.rand(1) * 999
lowerCAmelCase__ = torch.randn(2, 77, 768)
lowerCAmelCase__ = (sample, timestep, encoder_hidden_status)
try:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCAmelCase__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCAmelCase__ = 666
lowerCAmelCase__ = torch.Generator(device).manual_seed(seed)
lowerCAmelCase__ = {'''generator''': generator}
if args.steps is not None:
lowerCAmelCase__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCAmelCase__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 108 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
return []
a__ , a__ : int =min(SCREAMING_SNAKE_CASE ), max(SCREAMING_SNAKE_CASE )
a__ : Optional[int] =int(max_value - min_value ) + 1
a__ : list[list] =[[] for _ in range(SCREAMING_SNAKE_CASE )]
for i in my_list:
buckets[int(i - min_value )].append(SCREAMING_SNAKE_CASE )
return [v for bucket in buckets for v in sorted(SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 95 | 0 |
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
A: str = logging.get_logger(__name__)
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = UNetaDModel
__lowerCAmelCase : Tuple = 'sample'
@property
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = 4
UpperCAmelCase : Dict = 3
UpperCAmelCase : Optional[Any] = (32, 32)
UpperCAmelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = torch.tensor([10] ).to(_SCREAMING_SNAKE_CASE )
return {"sample": noise, "timestep": time_step}
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
UpperCAmelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : int = UNetaDModel
__lowerCAmelCase : List[str] = 'sample'
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : str = 4
UpperCAmelCase : str = 4
UpperCAmelCase : Any = (32, 32)
UpperCAmelCase : int = floats_tensor((batch_size, num_channels) + sizes ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = torch.tensor([10] ).to(_SCREAMING_SNAKE_CASE )
return {"sample": noise, "timestep": time_step}
@property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return (4, 32, 32)
@property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return (4, 32, 32)
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : int = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
UpperCAmelCase : Tuple = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Optional[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=_SCREAMING_SNAKE_CASE )
model_accelerate.to(_SCREAMING_SNAKE_CASE )
model_accelerate.eval()
UpperCAmelCase : Union[str, Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase : Dict = noise.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = torch.tensor([10] * noise.shape[0] ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = model_accelerate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=_SCREAMING_SNAKE_CASE , low_cpu_mem_usage=_SCREAMING_SNAKE_CASE )
model_normal_load.to(_SCREAMING_SNAKE_CASE )
model_normal_load.eval()
UpperCAmelCase : int = model_normal_load(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["""sample"""]
assert torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-3 )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase : Optional[Any] = noise.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
UpperCAmelCase : Tuple = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase : List[Any] = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-3 ) )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : List[str] = UNetaDModel
__lowerCAmelCase : List[str] = 'sample'
@property
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=(32, 32) ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 4
UpperCAmelCase : Tuple = 3
UpperCAmelCase : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_SCREAMING_SNAKE_CASE )
return {"sample": noise, "timestep": time_step}
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1E-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
UpperCAmelCase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = self.dummy_input
UpperCAmelCase : int = floats_tensor((4, 3) + (256, 256) ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = noise
UpperCAmelCase : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
assert image is not None, "Make sure output is not None"
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : int = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = 4
UpperCAmelCase : Optional[Any] = 3
UpperCAmelCase : List[Any] = (256, 256)
UpperCAmelCase : int = torch.ones((batch_size, num_channels) + sizes ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = torch.tensor(batch_size * [1E-4] ).to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
UpperCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
UpperCAmelCase : str = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase : Optional[int] = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-2 ) )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = 4
UpperCAmelCase : Optional[int] = 3
UpperCAmelCase : Optional[Any] = (32, 32)
UpperCAmelCase : List[Any] = torch.ones((batch_size, num_channels) + sizes ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = torch.tensor(batch_size * [1E-4] ).to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
UpperCAmelCase : int = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
UpperCAmelCase : Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase : int = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1E-2 ) )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
| 109 |
import numpy as np
def _A ( SCREAMING_SNAKE_CASE : np.array ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _a ( ):
"""simple docstring"""
lowercase__ = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
return image
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') )
# fmt: on
return rename_keys
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = dct.pop(SCREAMING_SNAKE_CASE )
lowercase__ = val
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase__ = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
lowercase__ = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
lowercase__ = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE , requires_grad=SCREAMING_SNAKE_CASE ), v_bias) )
lowercase__ = qkv_bias
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = 3_64 if '''coco''' in model_name else 2_24
lowercase__ = InstructBlipVisionConfig(image_size=SCREAMING_SNAKE_CASE ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowercase__ = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase__ = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowercase__ = LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=3_20_01 ).to_dict()
elif "vicuna-13b" in model_name:
lowercase__ = LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=3_20_01 ).to_dict()
else:
raise ValueError('''Model name not supported''' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowercase__ = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict()
lowercase__ = InstructBlipConfig(vision_config=SCREAMING_SNAKE_CASE , text_config=SCREAMING_SNAKE_CASE , qformer_config=SCREAMING_SNAKE_CASE )
return config, image_size
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' )
qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} )
if "t5" in model_name:
lowercase__ = TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowercase__ = LlamaTokenizerFast.from_pretrained(
'''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' )
tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} )
lowercase__ , lowercase__ = get_blipa_config(SCREAMING_SNAKE_CASE )
lowercase__ = InstructBlipForConditionalGeneration(SCREAMING_SNAKE_CASE ).eval()
lowercase__ = {
'''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''),
'''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''),
'''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''),
'''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''),
}
lowercase__ , lowercase__ = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
lowercase__ = '''cuda:1''' if torch.cuda.is_available() else '''cpu'''
lowercase__ = '''cuda:2''' if torch.cuda.is_available() else '''cpu'''
lowercase__ , lowercase__ , lowercase__ = load_model_and_preprocess(
name=SCREAMING_SNAKE_CASE , model_type=SCREAMING_SNAKE_CASE , is_eval=SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
original_model.eval()
print('''Done!''' )
# update state dict keys
lowercase__ = original_model.state_dict()
lowercase__ = create_rename_keys(SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase__ = state_dict.pop(SCREAMING_SNAKE_CASE )
if key.startswith('''Qformer.bert''' ):
lowercase__ = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
lowercase__ = key.replace('''self''' , '''attention''' )
if "llm_proj" in key:
lowercase__ = key.replace('''llm_proj''' , '''language_projection''' )
if "t5_proj" in key:
lowercase__ = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''llm_model''' ):
lowercase__ = key.replace('''llm_model''' , '''language_model''' )
if key.startswith('''t5''' ):
lowercase__ = key.replace('''t5''' , '''language''' )
lowercase__ = val
# read in qv biases
read_in_q_v_bias(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
lowercase__ = load_demo_image()
lowercase__ = '''What is unusual about this image?'''
# create processor
lowercase__ = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE )
lowercase__ = InstructBlipProcessor(
image_processor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , qformer_tokenizer=SCREAMING_SNAKE_CASE , )
lowercase__ = processor(images=SCREAMING_SNAKE_CASE , text=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE )
# make sure processor creates exact same pixel values
lowercase__ = vis_processors['''eval'''](SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE )
lowercase__ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , SCREAMING_SNAKE_CASE )
original_model.to(SCREAMING_SNAKE_CASE )
hf_model.to(SCREAMING_SNAKE_CASE )
with torch.no_grad():
if "vicuna" in model_name:
lowercase__ = original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits
lowercase__ = hf_model(**SCREAMING_SNAKE_CASE ).logits
else:
lowercase__ = original_model(
{'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits
lowercase__ = tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(SCREAMING_SNAKE_CASE )
lowercase__ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_00 )
lowercase__ = hf_model(**SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ).logits
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowercase__ = 1E-4 if '''vicuna''' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE )
print('''Looks ok!''' )
print('''Generating with original model...''' )
lowercase__ = original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('''Generating with HF model...''' )
lowercase__ = hf_model.generate(
**SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , num_beams=5 , max_length=2_56 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowercase__ = 2
print('''Original generation:''' , SCREAMING_SNAKE_CASE )
lowercase__ = processor.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ = [text.strip() for text in output_text]
print('''HF generation:''' , SCREAMING_SNAKE_CASE )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
processor.push_to_hub(f'Salesforce/{model_name}' )
hf_model.push_to_hub(f'Salesforce/{model_name}' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
lowerCAmelCase = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
lowerCAmelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 110 |
import numpy
# List of input, output pairs
UpperCAmelCase : str = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCAmelCase : Optional[int] = (((515, 22, 13), 555), ((61, 35, 49), 150))
UpperCAmelCase : str = [2, 4, 1, 5]
UpperCAmelCase : List[str] = len(train_data)
UpperCAmelCase : Dict = 0.0_0_9
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple="train" ):
"""simple docstring"""
return calculate_hypothesis_value(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - output(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _A ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ : Tuple =0
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=m ):
"""simple docstring"""
a__ : Any =0
for i in range(SCREAMING_SNAKE_CASE ):
if index == -1:
summation_value += _error(SCREAMING_SNAKE_CASE )
else:
summation_value += _error(SCREAMING_SNAKE_CASE ) * train_data[i][0][index]
return summation_value
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Any =summation_of_cost_derivative(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / m
return cost_derivative_value
def _A ( ):
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
a__ : Dict =0.0_0_0_0_0_2
a__ : Union[str, Any] =0
a__ : Any =0
while True:
j += 1
a__ : Any =[0, 0, 0, 0]
for i in range(0 , len(SCREAMING_SNAKE_CASE ) ):
a__ : Tuple =get_cost_derivative(i - 1 )
a__ : List[Any] =(
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE , rtol=SCREAMING_SNAKE_CASE , ):
break
a__ : Optional[Any] =temp_parameter_vector
print(("Number of iterations:", j) )
def _A ( ):
"""simple docstring"""
for i in range(len(SCREAMING_SNAKE_CASE ) ):
print(("Actual output value:", output(SCREAMING_SNAKE_CASE , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(SCREAMING_SNAKE_CASE , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 95 | 0 |
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
return 1 if input_a == input_a else 0
def UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 15 |
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
a__ : Optional[Any] =len(SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
a__ : List[Any] =arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
a__ : int =arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE )]
# Reverse whole list
a__ : List[str] =arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCAmelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase : Optional[int] = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 95 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _UpperCamelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :Dict ) -> Any:
UpperCAmelCase__ = inspect.getfile(accelerate.test_utils )
UpperCAmelCase__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
UpperCAmelCase__ = test_metrics
@require_cpu
def UpperCAmelCase_ ( self :Dict ) -> Optional[int]:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCAmelCase_ ( self :Any ) -> Dict:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCAmelCase_ ( self :Tuple ) -> List[str]:
self.test_metrics.main()
@require_multi_gpu
def UpperCAmelCase_ ( self :List[str] ) -> str:
print(f'''Found {torch.cuda.device_count()} devices.''' )
UpperCAmelCase__ = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
| 169 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Any =tempfile.mkdtemp()
# fmt: off
a__ : List[Any] =["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : str =dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
a__ : List[Any] =["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
a__ : Optional[int] ={"unk_token": "<unk>"}
a__ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
a__ : Optional[Any] ={
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
a__ : Dict =os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[Any] =[np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
a__ : List[Any] =[Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] =self.get_tokenizer()
a__ : int =self.get_rust_tokenizer()
a__ : List[str] =self.get_image_processor()
a__ : Dict =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
a__ : Optional[Any] =CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
a__ : Tuple =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
a__ : Dict =CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] =CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a__ : str =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a__ : int =self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
a__ : Optional[Any] =CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : str =self.get_image_processor()
a__ : Optional[int] =self.get_tokenizer()
a__ : Dict =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : str =self.prepare_image_inputs()
a__ : Any =image_processor(lowerCAmelCase__ , return_tensors="np" )
a__ : Optional[int] =processor(images=lowerCAmelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] =self.get_image_processor()
a__ : List[Any] =self.get_tokenizer()
a__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Union[str, Any] ="lower newer"
a__ : List[str] =processor(text=lowerCAmelCase__ )
a__ : str =tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Any =self.get_image_processor()
a__ : Dict =self.get_tokenizer()
a__ : Union[str, Any] =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Dict ="lower newer"
a__ : int =self.prepare_image_inputs()
a__ : Any =processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Union[str, Any] =self.get_image_processor()
a__ : Optional[Any] =self.get_tokenizer()
a__ : str =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : int =self.prepare_image_inputs()
a__ : Union[str, Any] =self.prepare_image_inputs()
a__ : Tuple =processor(images=lowerCAmelCase__ , visual_prompt=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] =self.get_image_processor()
a__ : Any =self.get_tokenizer()
a__ : Tuple =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Dict =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ : Optional[Any] =processor.batch_decode(lowerCAmelCase__ )
a__ : Dict =tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 95 | 0 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__lowerCAmelCase : str = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class __lowerCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *_snake_case : Optional[Any] , **_snake_case : List[Any] ):
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
requires_backends(self , '''decord''' )
self.check_model_type(lowerCAmelCase__ )
def snake_case_ ( self : List[Any] , _snake_case : List[str]=None , _snake_case : Dict=None , _snake_case : Union[str, Any]=None ):
__lowercase : int = {}
if frame_sampling_rate is not None:
__lowercase : Dict = frame_sampling_rate
if num_frames is not None:
__lowercase : List[str] = num_frames
__lowercase : str = {}
if top_k is not None:
__lowercase : Optional[int] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Dict , _snake_case : Any , **_snake_case : int ):
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ ( self : int , _snake_case : Optional[int] , _snake_case : Dict=None , _snake_case : Union[str, Any]=1 ):
if num_frames is None:
__lowercase : Any = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
__lowercase : Any = BytesIO(requests.get(lowerCAmelCase__ ).content )
__lowercase : str = VideoReader(lowerCAmelCase__ )
videoreader.seek(0 )
__lowercase : Optional[Any] = 0
__lowercase : str = num_frames * frame_sampling_rate - 1
__lowercase : Union[str, Any] = np.linspace(lowerCAmelCase__ , lowerCAmelCase__ , num=lowerCAmelCase__ , dtype=np.intaa )
__lowercase : List[Any] = videoreader.get_batch(lowerCAmelCase__ ).asnumpy()
__lowercase : Tuple = list(lowerCAmelCase__ )
__lowercase : Dict = self.image_processor(lowerCAmelCase__ , return_tensors=self.framework )
return model_inputs
def snake_case_ ( self : List[str] , _snake_case : Optional[Any] ):
__lowercase : Tuple = self.model(**lowerCAmelCase__ )
return model_outputs
def snake_case_ ( self : Optional[Any] , _snake_case : int , _snake_case : Dict=5 ):
if top_k > self.model.config.num_labels:
__lowercase : Dict = self.model.config.num_labels
if self.framework == "pt":
__lowercase : Tuple = model_outputs.logits.softmax(-1 )[0]
__lowercase : Any = probs.topk(lowerCAmelCase__ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowercase : Optional[int] = scores.tolist()
__lowercase : Dict = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
| 156 |
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(SCREAMING_SNAKE_CASE ) == 1:
return True
a__ : Union[str, Any] =series[1] - series[0]
for index in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Input list must be a non empty list" )
a__ : Any =0
for val in series:
answer += val
return answer / len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 0 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
lowerCamelCase_ = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def snake_case ( ):
UpperCAmelCase_ : Tuple = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase_ : int = g.get_repo("huggingface/diffusers" )
UpperCAmelCase_ : int = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase_ : int = sorted(issue.get_comments() ,key=lambda A__ : i.created_at ,reverse=A__ )
UpperCAmelCase_ : Tuple = comments[0] if len(A__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 268 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Tuple = """M-CLIP"""
def __init__( self , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=7_6_8 , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
a__ : int =transformerDimSize
a__ : Dict =imageDimSize
super().__init__(**lowerCAmelCase__ )
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Optional[Any] = MCLIPConfig
def __init__( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Tuple =XLMRobertaModel(lowerCAmelCase__ )
a__ : List[str] =torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] =self.transformer(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
a__ : int =(embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowerCAmelCase__ ), embs
| 95 | 0 |
from ...configuration_utils import PretrainedConfig
class __snake_case ( UpperCamelCase__ ):
'''simple docstring'''
lowerCAmelCase__ = """bert-generation"""
def __init__( self : List[Any] , A : Any=50_358 , A : Dict=1_024 , A : int=24 , A : Union[str, Any]=16 , A : Tuple=4_096 , A : Union[str, Any]="gelu" , A : Optional[int]=0.1 , A : Tuple=0.1 , A : Union[str, Any]=512 , A : List[Any]=0.02 , A : str=1E-12 , A : List[Any]=0 , A : int=2 , A : int=1 , A : Tuple="absolute" , A : str=True , **A : Union[str, Any] , ):
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__snake_case: Optional[Any] = vocab_size
__snake_case: Optional[int] = hidden_size
__snake_case: List[str] = num_hidden_layers
__snake_case: List[Any] = num_attention_heads
__snake_case: Tuple = hidden_act
__snake_case: str = intermediate_size
__snake_case: Any = hidden_dropout_prob
__snake_case: Optional[int] = attention_probs_dropout_prob
__snake_case: Optional[Any] = max_position_embeddings
__snake_case: Optional[int] = initializer_range
__snake_case: Optional[int] = layer_norm_eps
__snake_case: int = position_embedding_type
__snake_case: Optional[int] = use_cache
| 111 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase : Any = 16
UpperCAmelCase : str = 32
def _A ( SCREAMING_SNAKE_CASE : Accelerator , SCREAMING_SNAKE_CASE : int = 16 ):
"""simple docstring"""
a__ : int =AutoTokenizer.from_pretrained("bert-base-cased" )
a__ : List[str] =load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
a__ : int =tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__ : Dict =datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : Dict =tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__ : Optional[Any] =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__ : str =16
elif accelerator.mixed_precision != "no":
a__ : Union[str, Any] =8
else:
a__ : List[str] =None
return tokenizer.pad(
SCREAMING_SNAKE_CASE , padding="longest" , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
a__ : Any =DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
a__ : int =DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase : str = mocked_dataloaders # noqa: F811
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE ) == "1":
a__ : Tuple =2
# Initialize accelerator
a__ : int =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : Optional[int] =config["lr"]
a__ : Union[str, Any] =int(config["num_epochs"] )
a__ : Any =int(config["seed"] )
a__ : Dict =int(config["batch_size"] )
a__ : int =evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
a__ : int =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a__ : Dict =batch_size // MAX_GPU_BATCH_SIZE
a__ : Tuple =MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE )
a__ , a__ : Optional[int] =get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : List[str] =AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ : List[str] =model.to(accelerator.device )
# Instantiate optimizer
a__ : List[Any] =AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
# Instantiate scheduler
a__ : Optional[int] =get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : Optional[int] =accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
a__ : Dict =model(**SCREAMING_SNAKE_CASE )
a__ : List[Any] =outputs.loss
a__ : List[str] =loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
a__ : Optional[Any] =0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a__ : Any =model(**SCREAMING_SNAKE_CASE )
a__ : str =outputs.logits.argmax(dim=-1 )
a__ , a__ : List[str] =accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(SCREAMING_SNAKE_CASE ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
a__ : Optional[Any] =predictions[: len(eval_dataloader.dataset ) - samples_seen]
a__ : Dict =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
a__ : Tuple =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE )
def _A ( ):
"""simple docstring"""
a__ : List[str] =argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
a__ : str =parser.parse_args()
a__ : Optional[int] ={"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 95 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
lowerCAmelCase_ : Optional[Any] = MgpstrTokenizer
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : List[Any] = False
def lowercase__ ( self : Union[str, Any] ):
super().setUp()
# fmt: off
lowerCAmelCase : List[str] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowerCAmelCase : Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '\n' )
def lowercase__ ( self : Tuple , **UpperCAmelCase_ : List[str] ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : Any = "tester"
lowerCAmelCase : Dict = "tester"
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def lowercase__ ( self : Dict ):
pass
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Dict = self.get_tokenizers(do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCAmelCase : Dict = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({'cls_token': special_token} )
lowerCAmelCase : Optional[int] = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
lowerCAmelCase : Optional[int] = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
self.assertTrue(special_token not in decoded )
def lowercase__ ( self : Dict ):
lowerCAmelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCAmelCase : Optional[int] = self.get_input_output_texts(lowerCAmelCase__ )
lowerCAmelCase : int = tokenizer.tokenize(lowerCAmelCase__ )
lowerCAmelCase : Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
lowerCAmelCase : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertNotEqual(len(lowerCAmelCase__ ) , 0 )
lowerCAmelCase : Optional[Any] = tokenizer.decode(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(text_a.replace(' ' , '' ) , lowerCAmelCase__ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def lowercase__ ( self : Tuple ):
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def lowercase__ ( self : Tuple ):
pass
| 138 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , ) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] =size if size is not None else {"shortest_edge": 2_0}
a__ : List[str] =crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
a__ : Tuple =parent
a__ : Union[str, Any] =batch_size
a__ : List[str] =num_channels
a__ : List[Any] =image_size
a__ : str =min_resolution
a__ : Optional[int] =max_resolution
a__ : Tuple =do_resize
a__ : Union[str, Any] =size
a__ : List[Any] =do_center_crop
a__ : List[str] =crop_size
a__ : Optional[int] =do_flip_channel_order
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : int = MobileViTImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple =MobileViTImageProcessingTester(self )
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_flip_channel_order" ) )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
a__ : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
a__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : List[Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
a__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : int =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : int =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
a__ : List[str] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : List[str] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 95 | 0 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 167 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , ) -> Optional[int]:
'''simple docstring'''
a__ : str =size if size is not None else {"shortest_edge": 2_0}
a__ : Union[str, Any] =crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
a__ : Tuple =parent
a__ : Optional[int] =batch_size
a__ : Any =num_channels
a__ : List[str] =image_size
a__ : Dict =min_resolution
a__ : List[Any] =max_resolution
a__ : Dict =do_resize
a__ : Union[str, Any] =size
a__ : str =do_center_crop
a__ : List[str] =crop_size
def _lowercase ( self ) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] =MobileNetVaImageProcessingTester(self )
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "crop_size" ) )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Any =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
a__ : Dict =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def _lowercase ( self ) -> Any:
'''simple docstring'''
pass
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
a__ : List[Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : Dict =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : str =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
a__ : List[str] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : Union[str, Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Any =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
a__ : Optional[Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : str =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 95 | 0 |
def lowerCAmelCase ( lowerCAmelCase_ )-> List[Any]:
lowerCAmelCase_ : Tuple = len(lowerCAmelCase_ )
for _ in range(lowerCAmelCase_ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCAmelCase_ : Tuple = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCAmelCase : Dict =list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""") | 262 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Any = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 95 | 0 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_00 * 2**20, 9_00 * 2**20] )
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__lowerCAmelCase = dataset_size < in_memory_max_size
else:
__lowerCAmelCase = False
__lowerCAmelCase = is_small_dataset(SCREAMING_SNAKE_CASE_ )
assert result == expected
| 92 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Any = """swin2sr"""
_lowercase : Tuple = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowerCAmelCase__=6_4 , lowerCAmelCase__=1 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8_0 , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=8 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=2 , lowerCAmelCase__=1.0 , lowerCAmelCase__="1conv" , lowerCAmelCase__="pixelshuffle" , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
a__ : Optional[Any] =image_size
a__ : Dict =patch_size
a__ : Tuple =num_channels
a__ : Union[str, Any] =embed_dim
a__ : Optional[Any] =depths
a__ : List[str] =len(lowerCAmelCase__ )
a__ : Any =num_heads
a__ : Any =window_size
a__ : str =mlp_ratio
a__ : List[str] =qkv_bias
a__ : Dict =hidden_dropout_prob
a__ : List[str] =attention_probs_dropout_prob
a__ : Dict =drop_path_rate
a__ : Optional[Any] =hidden_act
a__ : Union[str, Any] =use_absolute_embeddings
a__ : Optional[Any] =layer_norm_eps
a__ : List[Any] =initializer_range
a__ : int =upscale
a__ : Optional[int] =img_range
a__ : Any =resi_connection
a__ : Optional[Any] =upsampler
| 95 | 0 |
from manim import *
class UpperCamelCase_ ( UpperCamelCase__ ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = Rectangle(height=0.5 , width=0.5 )
snake_case_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
snake_case_ = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
snake_case_ = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
snake_case_ = Text('CPU' , font_size=24 )
snake_case_ = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase__ )
snake_case_ = [mem.copy() for i in range(4 )]
snake_case_ = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
snake_case_ = Text('GPU' , font_size=24 )
snake_case_ = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCAmelCase__ )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
snake_case_ = Text('Model' , font_size=24 )
snake_case_ = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCAmelCase__ )
snake_case_ = []
for i, rect in enumerate(lowerCAmelCase__ ):
rect.set_stroke(lowerCAmelCase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
snake_case_ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCAmelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowerCAmelCase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowerCAmelCase__ , buff=0.0 )
self.add(lowerCAmelCase__ )
cpu_targs.append(lowerCAmelCase__ )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
snake_case_ = Text('Loaded Checkpoint' , font_size=24 )
snake_case_ = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , aligned_edge=lowerCAmelCase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
snake_case_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(lowerCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
snake_case_ = MarkupText(
F'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ ) , Write(lowerCAmelCase__ ) )
self.play(Write(lowerCAmelCase__ , run_time=1 ) , Create(lowerCAmelCase__ , run_time=1 ) )
snake_case_ = []
snake_case_ = []
for i, rect in enumerate(lowerCAmelCase__ ):
snake_case_ = fill.copy().set_fill(lowerCAmelCase__ , opacity=0.7 )
target.move_to(lowerCAmelCase__ )
first_animations.append(GrowFromCenter(lowerCAmelCase__ , run_time=1 ) )
snake_case_ = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowerCAmelCase__ , run_time=1.5 ) )
self.play(*lowerCAmelCase__ )
self.play(*lowerCAmelCase__ )
self.wait()
| 178 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __lowerCAmelCase :
pass
| 95 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 320 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Optional[Any] = """philschmid/bart-large-cnn-samsum"""
_lowercase : List[Any] = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
_lowercase : Any = """summarizer"""
_lowercase : Any = AutoTokenizer
_lowercase : str = AutoModelForSeqaSeqLM
_lowercase : Optional[int] = ["""text"""]
_lowercase : Optional[int] = ["""text"""]
def _lowercase ( self , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
return self.pre_processor(lowerCAmelCase__ , return_tensors="pt" , truncation=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.model.generate(**lowerCAmelCase__ )[0]
def _lowercase ( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return self.pre_processor.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
| 95 | 0 |
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
if any(not isinstance(a_ , a_ ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(a_ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(a_ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 15 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase : List[Any] = [
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
UpperCAmelCase : Optional[int] = {
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase : Optional[int] = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
UpperCAmelCase : Optional[int] = {F"""funnel-transformer/{name}""": {"""do_lower_case""": True} for name in _model_names}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_INIT_CONFIGURATION
_lowercase : Union[str, Any] = FunnelTokenizer
_lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : int = 2
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<sep>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<cls>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__="##" , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , clean_text=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , wordpieces_prefix=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : Optional[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
a__ : List[str] =getattr(lowerCAmelCase__ , normalizer_state.pop("type" ) )
a__ : Union[str, Any] =do_lower_case
a__ : Any =strip_accents
a__ : Optional[Any] =tokenize_chinese_chars
a__ : Dict =normalizer_class(**lowerCAmelCase__ )
a__ : Any =do_lower_case
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> str:
'''simple docstring'''
a__ : Dict =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
a__ : Optional[int] =[self.sep_token_id]
a__ : Union[str, Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
a__ : Tuple =self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 95 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCamelCase ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase_ = VideoToVideoSDPipeline
UpperCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
UpperCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
UpperCAmelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
UpperCAmelCase_ = False
# No `output_type`.
UpperCAmelCase_ = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def UpperCAmelCase_ ( self :Union[str, Any] ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCAmelCase__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
UpperCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
UpperCAmelCase__ = CLIPTextModel(lowerCAmelCase__ )
UpperCAmelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :Dict , lowerCamelCase :Union[str, Any]=0 ) -> Optional[int]:
UpperCAmelCase__ = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith("mps" ):
UpperCAmelCase__ = torch.manual_seed(lowerCAmelCase__ )
else:
UpperCAmelCase__ = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
UpperCAmelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def UpperCAmelCase_ ( self :Optional[int] ) -> Any:
UpperCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = VideoToVideoSDPipeline(**lowerCAmelCase__ )
UpperCAmelCase__ = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase__ = self.get_dummy_inputs(lowerCAmelCase__ )
UpperCAmelCase__ = "np"
UpperCAmelCase__ = sd_pipe(**lowerCAmelCase__ ).frames
UpperCAmelCase__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase__ = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCAmelCase_ ( self :str ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase__ , expected_max_diff=5e-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCAmelCase_ ( self :List[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCAmelCase_ ( self :List[Any] ) -> int:
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def UpperCAmelCase_ ( self :Any ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self :Union[str, Any] ) -> Any:
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCamelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :Optional[int] ) -> Any:
UpperCAmelCase__ = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase__ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ = torch.randn((1, 10, 3, 1024, 576) , generator=lowerCAmelCase__ )
UpperCAmelCase__ = video.to("cuda" )
UpperCAmelCase__ = "Spiderman is surfing"
UpperCAmelCase__ = pipe(lowerCAmelCase__ , video=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=3 , output_type="pt" ).frames
UpperCAmelCase__ = np.array([-1.0_45_89_84, -1.1_27_92_97, -0.9_66_30_86, -0.91_50_39_06, -0.75_09_76_56] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 169 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = "arrow" , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : int =load_from_cache_file
a__ : Tuple =file_format
a__ : List[Any] =Spark(
df=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , working_dir=lowerCAmelCase__ , **lowerCAmelCase__ , )
def _lowercase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
a__ : str =None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCAmelCase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 95 | 0 |
from string import ascii_uppercase
__lowerCAmelCase : int = {char: i for i, char in enumerate(ascii_uppercase)}
__lowerCAmelCase : Optional[Any] = dict(enumerate(ascii_uppercase))
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
__lowercase : List[str] = len(__lowerCAmelCase )
__lowercase : str = 0
while True:
if x == i:
__lowercase : Union[str, Any] = 0
if len(__lowerCAmelCase ) == len(__lowerCAmelCase ):
break
key += key[i]
i += 1
return key
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
__lowercase : Tuple = ""
__lowercase : Tuple = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__lowercase : str = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
__lowercase : Any = ""
__lowercase : Dict = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__lowercase : Tuple = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def UpperCAmelCase_ ( ) -> int:
__lowercase : List[str] = "THE GERMAN ATTACK"
__lowercase : List[str] = "SECRET"
__lowercase : Tuple = generate_key(__lowerCAmelCase , __lowerCAmelCase )
__lowercase : List[str] = cipher_text(__lowerCAmelCase , __lowerCAmelCase )
print(F'Encrypted Text = {s}' )
print(F'Original Text = {original_text(__lowerCAmelCase , __lowerCAmelCase )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 156 |
from math import pi
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 95 | 0 |
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
lowerCamelCase_ = """
import os
"""
lowerCamelCase_ = """
def foo():
import os
return False
"""
lowerCamelCase_ = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
lowerCamelCase_ = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
lowerCamelCase_ = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
lowerCamelCase_ = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
lowerCamelCase_ = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
lowerCamelCase_ = """
import os
try:
import bar
except:
raise ValueError()
"""
lowerCamelCase_ = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
lowerCamelCase_ = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
lowerCamelCase_ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case" ,A__ )
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : int = os.path.join(A__ ,"test_file.py" )
with open(A__ ,"w" ) as _tmp_file:
_tmp_file.write(A__ )
UpperCAmelCase_ : List[str] = get_imports(A__ )
assert parsed_imports == ["os"]
| 268 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCAmelCase : int = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
a__ : Optional[int] =XLNetConfig.from_json_file(SCREAMING_SNAKE_CASE )
a__ : Dict =finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
a__ : List[str] =finetuning_task
a__ : Tuple =GLUE_TASKS_NUM_LABELS[finetuning_task]
a__ : List[Any] =XLNetForSequenceClassification(SCREAMING_SNAKE_CASE )
elif "squad" in finetuning_task:
a__ : Optional[int] =finetuning_task
a__ : Dict =XLNetForQuestionAnswering(SCREAMING_SNAKE_CASE )
else:
a__ : List[Any] =XLNetLMHeadModel(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'''Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
print(f'''Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
UpperCAmelCase : int = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 95 | 0 |
import math
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = 0) -> Optional[int]:
__snake_case: Union[str, Any] = end or len(SCREAMING_SNAKE_CASE__)
for i in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: Dict = i
__snake_case: Optional[int] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__snake_case: Tuple = array[temp_index - 1]
temp_index -= 1
__snake_case: Any = temp_index_value
return array
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Dict: # Max Heap
__snake_case: Optional[int] = index
__snake_case: Any = 2 * index + 1 # Left Node
__snake_case: Tuple = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__snake_case: int = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__snake_case: int = right_index
if largest != index:
__snake_case: str = array[largest], array[index]
heapify(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
def A__ ( SCREAMING_SNAKE_CASE__) -> Optional[Any]:
__snake_case: Any = len(SCREAMING_SNAKE_CASE__)
for i in range(n // 2 , -1 , -1):
heapify(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
for i in range(n - 1 , 0 , -1):
__snake_case: Optional[Any] = array[0], array[i]
heapify(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__)
return array
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[Any]:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
__snake_case: str = low
__snake_case: List[Any] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__snake_case: Tuple = array[j], array[i]
i += 1
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
if len(SCREAMING_SNAKE_CASE__) == 0:
return array
__snake_case: str = 2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE__)))
__snake_case: str = 16
return intro_sort(SCREAMING_SNAKE_CASE__ , 0 , len(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Any:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(SCREAMING_SNAKE_CASE__)
max_depth -= 1
__snake_case: Dict = median_of_a(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , start + ((end - start) // 2) + 1 , end - 1)
__snake_case: Any = partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
intro_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: Tuple = p
return insertion_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : Optional[Any] = input("Enter numbers separated by a comma : ").strip()
__UpperCAmelCase : Dict = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 111 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : List[Any] = """canine"""
def __init__( self , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_6_3_8_4 , lowerCAmelCase__=1_6 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__=0XE0_00 , lowerCAmelCase__=0XE0_01 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=8 , lowerCAmelCase__=1_6_3_8_4 , lowerCAmelCase__=1_2_8 , **lowerCAmelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Optional[int] =max_position_embeddings
a__ : str =hidden_size
a__ : Optional[Any] =num_hidden_layers
a__ : Tuple =num_attention_heads
a__ : Optional[Any] =intermediate_size
a__ : Optional[int] =hidden_act
a__ : List[Any] =hidden_dropout_prob
a__ : Union[str, Any] =attention_probs_dropout_prob
a__ : Optional[Any] =initializer_range
a__ : Union[str, Any] =type_vocab_size
a__ : Optional[int] =layer_norm_eps
# Character config:
a__ : int =downsampling_rate
a__ : Optional[Any] =upsampling_kernel_size
a__ : Union[str, Any] =num_hash_functions
a__ : Any =num_hash_buckets
a__ : int =local_transformer_stride
| 95 | 0 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __A ( unittest.TestCase ):
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ )
def lowercase__ ( self : int ):
lowerCAmelCase : Optional[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(lowerCAmelCase__ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def lowercase__ ( self : str ):
lowerCAmelCase : Any = None
ops.enable_eager_execution_internal()
lowerCAmelCase : int = tf.config.list_physical_devices('CPU' )
if len(lowerCAmelCase__ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCAmelCase : Optional[int] = tf.config.list_logical_devices(device_type='CPU' )
lowerCAmelCase : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCAmelCase : int = GradientAccumulator()
lowerCAmelCase : Optional[Any] = tf.Variable([4.0, 3.0] )
lowerCAmelCase : Dict = create_optimizer(5E-5 , 10 , 5 )
lowerCAmelCase : Optional[int] = tf.Variable([0.0, 0.0] , trainable=lowerCAmelCase__ )
def accumulate_on_replica(UpperCAmelCase_ : str ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int ):
with strategy.scope():
lowerCAmelCase : Optional[Any] = strategy.experimental_local_results(lowerCAmelCase__ )
local_variables[0].assign(lowerCAmelCase__ )
local_variables[1].assign(lowerCAmelCase__ )
strategy.run(lowerCAmelCase__ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(lowerCAmelCase__ )
def _check_local_values(UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : Tuple = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , lowerCAmelCase__ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , lowerCAmelCase__ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 138 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCAmelCase : int = False
class __lowerCAmelCase ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : str =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
a__ : Optional[Any] =torch.manual_seed(0 )
a__ : Optional[Any] =pipe.dual_guided(
prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase__ )
a__ : str =VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : Optional[Any] =generator.manual_seed(0 )
a__ : Tuple =pipe.dual_guided(
prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : str =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : Optional[Any] ="cyberpunk 2077"
a__ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
a__ : Union[str, Any] =torch.manual_seed(0 )
a__ : Tuple =pipe.dual_guided(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" , ).images
a__ : int =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Any =np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
a__ : str ="A painting of a squirrel eating a burger "
a__ : Optional[int] =torch.manual_seed(0 )
a__ : str =pipe.text_to_image(
prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" ).images
a__ : Any =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Optional[int] =np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
a__ : Optional[Any] =pipe.image_variation(lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="numpy" ).images
a__ : Union[str, Any] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Any =np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 95 | 0 |
"""simple docstring"""
import math
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase_ ( _UpperCAmelCase = 1_0001 ):
"""simple docstring"""
try:
A_ : Optional[int] = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
A_ : list[int] = []
A_ : int = 2
while len(_UpperCAmelCase ) < nth:
if is_prime(_UpperCAmelCase ):
primes.append(_UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(_UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(f'{solution() = }')
| 167 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __lowerCAmelCase :
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
def _lowercase ( self ) -> int:
'''simple docstring'''
raise NotImplementedError()
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : str =tokenizer
a__ : List[str] =skip_prompt
a__ : List[Any] =decode_kwargs
# variables used in the streaming process
a__ : Dict =[]
a__ : int =0
a__ : str =True
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
a__ : Any =value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
a__ : Dict =False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
a__ : List[Any] =text[self.print_len :]
a__ : List[str] =[]
a__ : Optional[int] =0
# If the last token is a CJK character, we print the characters.
elif len(lowerCAmelCase__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
a__ : List[str] =text[self.print_len :]
self.print_len += len(lowerCAmelCase__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
a__ : str =text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(lowerCAmelCase__ )
self.on_finalized_text(lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
if len(self.token_cache ) > 0:
a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
a__ : List[Any] =text[self.print_len :]
a__ : List[str] =[]
a__ : Optional[int] =0
else:
a__ : Union[str, Any] =""
a__ : Any =True
self.on_finalized_text(lowerCAmelCase__ , stream_end=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Optional[Any]:
'''simple docstring'''
print(lowerCAmelCase__ , flush=lowerCAmelCase__ , end="" if not stream_end else None )
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : str =Queue()
a__ : Optional[Any] =None
a__ : Any =timeout
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> List[str]:
'''simple docstring'''
self.text_queue.put(lowerCAmelCase__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> Dict:
'''simple docstring'''
return self
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : int =self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 95 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_UpperCAmelCase : Tuple =logging.get_logger(__name__)
class snake_case__( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self , *__lowercase , **__lowercase ) -> None:
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) | 262 |
def _A ( SCREAMING_SNAKE_CASE : int = 50 ):
"""simple docstring"""
a__ : Any =[1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 95 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _a ( ):
__lowerCAmelCase = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
# Let's go
__lowerCAmelCase = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 92 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
return []
a__ , a__ : int =min(SCREAMING_SNAKE_CASE ), max(SCREAMING_SNAKE_CASE )
a__ : Optional[int] =int(max_value - min_value ) + 1
a__ : list[list] =[[] for _ in range(SCREAMING_SNAKE_CASE )]
for i in my_list:
buckets[int(i - min_value )].append(SCREAMING_SNAKE_CASE )
return [v for bucket in buckets for v in sorted(SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 95 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase = logging.get_logger(__name__)
def __UpperCAmelCase ( a_):
if isinstance(a_ , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(a_ , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(a_):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''')
class UpperCamelCase_ ( UpperCamelCase__ ):
'''simple docstring'''
lowerCAmelCase = ["""pixel_values"""]
def __init__( self , a = True , a = None , a = PILImageResampling.BILINEAR , a = True , a = None , a = True , a = 1 / 2_55 , a = True , a = None , a = None , **a , ) -> None:
super().__init__(**lowerCAmelCase__ )
snake_case_ = size if size is not None else {"shortest_edge": 2_24}
snake_case_ = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
snake_case_ = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ = get_size_dict(lowerCAmelCase__ , param_name='crop_size' )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = resample
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCamelCase ( self , a , a , a = PILImageResampling.BILINEAR , a = None , **a , ) -> np.ndarray:
snake_case_ = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" in size:
snake_case_ = get_resize_output_image_size(lowerCAmelCase__ , size['shortest_edge'] , default_to_square=lowerCAmelCase__ )
elif "height" in size and "width" in size:
snake_case_ = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self , a , a , a = None , **a , ) -> np.ndarray:
snake_case_ = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase__ , size=(size['height'], size['width']) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self , a , a , a = None , **a , ) -> int:
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self , a , a , a , a = None , **a , ) -> np.ndarray:
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCamelCase ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case_ = to_numpy_array(lowerCAmelCase__ )
if do_resize:
snake_case_ = self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ )
if do_center_crop:
snake_case_ = self.center_crop(lowerCAmelCase__ , size=lowerCAmelCase__ )
if do_rescale:
snake_case_ = self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ )
if do_normalize:
snake_case_ = self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ )
snake_case_ = to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ )
return image
def _UpperCamelCase ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> PIL.Image.Image:
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(lowerCAmelCase__ , param_name='crop_size' )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
snake_case_ = make_batched(lowerCAmelCase__ )
snake_case_ = [
[
self._preprocess_image(
image=lowerCAmelCase__ , do_resize=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , do_center_crop=lowerCAmelCase__ , crop_size=lowerCAmelCase__ , do_rescale=lowerCAmelCase__ , rescale_factor=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , image_mean=lowerCAmelCase__ , image_std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , )
for img in video
]
for video in videos
]
snake_case_ = {"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 178 |
import numpy as np
def _A ( SCREAMING_SNAKE_CASE : np.array ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = """▁"""
__snake_case = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
__snake_case = {
"""facebook/m2m100_418M""": 1024,
}
# fmt: off
__snake_case = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = VOCAB_FILES_NAMES
A_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : int = PRETRAINED_VOCAB_FILES_MAP
A_ : Dict = ["""input_ids""", """attention_mask"""]
A_ : List[int] = []
A_ : List[int] = []
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="m2m100" , __UpperCAmelCase = None , __UpperCAmelCase=8 , **__UpperCAmelCase , ) -> None:
_a = {} if sp_model_kwargs is None else sp_model_kwargs
_a = language_codes
_a = FAIRSEQ_LANGUAGE_CODES[language_codes]
_a = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
_a = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(lowerCAmelCase__ )
for lang_code in fairseq_language_code
if self.get_lang_token(lowerCAmelCase__ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , language_codes=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=lowerCAmelCase__ , **lowerCAmelCase__ , )
_a = vocab_file
_a = load_json(lowerCAmelCase__ )
_a = {v: k for k, v in self.encoder.items()}
_a = spm_file
_a = load_spm(lowerCAmelCase__ , self.sp_model_kwargs )
_a = len(self.encoder )
_a = {
self.get_lang_token(lowerCAmelCase__ ): self.encoder_size + i for i, lang_code in enumerate(lowerCAmelCase__ )
}
_a = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowerCAmelCase__ )}
_a = {v: k for k, v in self.lang_token_to_id.items()}
_a = src_lang if src_lang is not None else "en"
_a = tgt_lang
_a = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_a = num_madeup_words
@property
def _UpperCAmelCase ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _UpperCAmelCase ( self ) -> str:
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> None:
_a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(lowerCAmelCase__ , self.encoder[self.unk_token] )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(lowerCAmelCase__ , self.unk_token )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Any:
_a = []
_a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
_a = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
_a = [1] * len(self.prefix_tokens )
_a = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self ) -> Dict:
_a = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self , __UpperCAmelCase ) -> None:
_a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a = {}
_a = load_spm(self.spm_file , self.sp_model_kwargs )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
_a = Path(lowerCAmelCase__ )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , lowerCAmelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowerCAmelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(lowerCAmelCase__ , '''wb''' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (str(lowerCAmelCase__ ), str(lowerCAmelCase__ ))
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = "en" , __UpperCAmelCase = None , __UpperCAmelCase = "ro" , **__UpperCAmelCase , ) -> BatchEncoding:
_a = src_lang
_a = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_a = src_lang
_a = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ )
_a = self.get_lang_id(lowerCAmelCase__ )
_a = tgt_lang_id
return inputs
def _UpperCAmelCase ( self ) -> Tuple:
self.set_src_lang_special_tokens(self.src_lang )
def _UpperCAmelCase ( self ) -> List[Any]:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> None:
_a = self.get_lang_token(lowerCAmelCase__ )
_a = self.lang_token_to_id[lang_token]
_a = [self.cur_lang_id]
_a = [self.eos_token_id]
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> None:
_a = self.get_lang_token(lowerCAmelCase__ )
_a = self.lang_token_to_id[lang_token]
_a = [self.cur_lang_id]
_a = [self.eos_token_id]
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> str:
return self.lang_code_to_token[lang]
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> int:
_a = self.get_lang_token(lowerCAmelCase__ )
return self.lang_token_to_id[lang_token]
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : Dict[str, Any] ):
"""simple docstring"""
_a = sentencepiece.SentencePieceProcessor(**_lowerCAmelCase )
spm.Load(str(_lowerCAmelCase ) )
return spm
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
with open(_lowerCAmelCase, '''r''' ) as f:
return json.load(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : str ):
"""simple docstring"""
with open(_lowerCAmelCase, '''w''' ) as f:
json.dump(_lowerCAmelCase, _lowerCAmelCase, indent=2 ) | 320 |
import numpy
# List of input, output pairs
UpperCAmelCase : str = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCAmelCase : Optional[int] = (((515, 22, 13), 555), ((61, 35, 49), 150))
UpperCAmelCase : str = [2, 4, 1, 5]
UpperCAmelCase : List[str] = len(train_data)
UpperCAmelCase : Dict = 0.0_0_9
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple="train" ):
"""simple docstring"""
return calculate_hypothesis_value(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - output(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _A ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ : Tuple =0
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=m ):
"""simple docstring"""
a__ : Any =0
for i in range(SCREAMING_SNAKE_CASE ):
if index == -1:
summation_value += _error(SCREAMING_SNAKE_CASE )
else:
summation_value += _error(SCREAMING_SNAKE_CASE ) * train_data[i][0][index]
return summation_value
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Any =summation_of_cost_derivative(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / m
return cost_derivative_value
def _A ( ):
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
a__ : Dict =0.0_0_0_0_0_2
a__ : Union[str, Any] =0
a__ : Any =0
while True:
j += 1
a__ : Any =[0, 0, 0, 0]
for i in range(0 , len(SCREAMING_SNAKE_CASE ) ):
a__ : Tuple =get_cost_derivative(i - 1 )
a__ : List[Any] =(
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE , rtol=SCREAMING_SNAKE_CASE , ):
break
a__ : Optional[Any] =temp_parameter_vector
print(("Number of iterations:", j) )
def _A ( ):
"""simple docstring"""
for i in range(len(SCREAMING_SNAKE_CASE ) ):
print(("Actual output value:", output(SCREAMING_SNAKE_CASE , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(SCREAMING_SNAKE_CASE , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 95 | 0 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = ["""onnx"""]
def __init__( self : List[str] ,*A : List[str] ,**A : List[str] ):
requires_backends(self ,["onnx"] )
@classmethod
def UpperCamelCase_ ( cls : str ,*A : Optional[int] ,**A : Union[str, Any] ):
requires_backends(cls ,["onnx"] )
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] ,*A : Tuple ,**A : Optional[Any] ):
requires_backends(cls ,["onnx"] )
| 15 |
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
a__ : Optional[Any] =len(SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
a__ : List[Any] =arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
a__ : int =arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE )]
# Reverse whole list
a__ : List[str] =arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCAmelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase : Optional[int] = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 95 | 0 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class _UpperCamelCase :
def __init__( self :Tuple , lowerCamelCase :str , ) -> int:
UpperCAmelCase__ = parent
UpperCAmelCase__ = 13
UpperCAmelCase__ = 7
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = 99
UpperCAmelCase__ = 32
UpperCAmelCase__ = 2
UpperCAmelCase__ = 4
UpperCAmelCase__ = 37
UpperCAmelCase__ = "gelu"
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 512
UpperCAmelCase__ = 16
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0.02
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = None
def UpperCAmelCase_ ( self :List[Any] ) -> Optional[Any]:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :str , lowerCamelCase :List[str] , lowerCamelCase :Optional[Any] , lowerCamelCase :Optional[int] , lowerCamelCase :Union[str, Any] , lowerCamelCase :List[Any] ) -> Union[str, Any]:
UpperCAmelCase__ = TFDistilBertModel(config=lowerCAmelCase__ )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(lowerCAmelCase__ )
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :Dict , lowerCamelCase :int , lowerCamelCase :Optional[int] , lowerCamelCase :Tuple , lowerCamelCase :Optional[Any] , lowerCamelCase :Dict ) -> Union[str, Any]:
UpperCAmelCase__ = TFDistilBertForMaskedLM(config=lowerCAmelCase__ )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self :Union[str, Any] , lowerCamelCase :List[Any] , lowerCamelCase :List[str] , lowerCamelCase :List[Any] , lowerCamelCase :Dict , lowerCamelCase :Optional[int] , lowerCamelCase :Optional[int] ) -> List[Any]:
UpperCAmelCase__ = TFDistilBertForQuestionAnswering(config=lowerCAmelCase__ )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
UpperCAmelCase__ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self :Union[str, Any] , lowerCamelCase :Any , lowerCamelCase :str , lowerCamelCase :Optional[Any] , lowerCamelCase :Union[str, Any] , lowerCamelCase :Union[str, Any] , lowerCamelCase :int ) -> Optional[Any]:
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFDistilBertForSequenceClassification(lowerCAmelCase__ )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :str , lowerCamelCase :str , lowerCamelCase :int , lowerCamelCase :Optional[Any] , lowerCamelCase :Optional[Any] , lowerCamelCase :Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = TFDistilBertForMultipleChoice(lowerCAmelCase__ )
UpperCAmelCase__ = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
UpperCAmelCase__ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self :Union[str, Any] , lowerCamelCase :Any , lowerCamelCase :Optional[int] , lowerCamelCase :Dict , lowerCamelCase :Optional[Any] , lowerCamelCase :List[str] , lowerCamelCase :str ) -> Optional[Any]:
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFDistilBertForTokenClassification(lowerCAmelCase__ )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self :List[str] ) -> Dict:
UpperCAmelCase__ = self.prepare_config_and_inputs()
(UpperCAmelCase__) = config_and_inputs
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCAmelCase_ = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def UpperCAmelCase_ ( self :str ) -> Tuple:
UpperCAmelCase__ = TFDistilBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=lowerCAmelCase__ , dim=37 )
def UpperCAmelCase_ ( self :str ) -> int:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self :List[Any] ) -> Tuple:
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase__ )
def UpperCAmelCase_ ( self :Optional[int] ) -> Union[str, Any]:
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase__ )
def UpperCAmelCase_ ( self :Dict ) -> Optional[int]:
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase__ )
def UpperCAmelCase_ ( self :Optional[Any] ) -> List[str]:
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase__ )
def UpperCAmelCase_ ( self :List[Any] ) -> str:
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase__ )
def UpperCAmelCase_ ( self :Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase__ )
@slow
def UpperCAmelCase_ ( self :Optional[Any] ) -> Union[str, Any]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
UpperCAmelCase__ = TFDistilBertModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self :Optional[int] ) -> Optional[int]:
UpperCAmelCase__ = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(lowerCAmelCase__ )[0]
UpperCAmelCase__ = [1, 6, 768]
self.assertEqual(output.shape , lowerCAmelCase__ )
UpperCAmelCase__ = tf.constant(
[
[
[0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99],
[0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04],
[0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
| 169 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Any =tempfile.mkdtemp()
# fmt: off
a__ : List[Any] =["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : str =dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
a__ : List[Any] =["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
a__ : Optional[int] ={"unk_token": "<unk>"}
a__ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
a__ : Optional[Any] ={
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
a__ : Dict =os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[Any] =[np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
a__ : List[Any] =[Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] =self.get_tokenizer()
a__ : int =self.get_rust_tokenizer()
a__ : List[str] =self.get_image_processor()
a__ : Dict =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
a__ : Optional[Any] =CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
a__ : Tuple =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
a__ : Dict =CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] =CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a__ : str =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a__ : int =self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
a__ : Optional[Any] =CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : str =self.get_image_processor()
a__ : Optional[int] =self.get_tokenizer()
a__ : Dict =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : str =self.prepare_image_inputs()
a__ : Any =image_processor(lowerCAmelCase__ , return_tensors="np" )
a__ : Optional[int] =processor(images=lowerCAmelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] =self.get_image_processor()
a__ : List[Any] =self.get_tokenizer()
a__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Union[str, Any] ="lower newer"
a__ : List[str] =processor(text=lowerCAmelCase__ )
a__ : str =tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Any =self.get_image_processor()
a__ : Dict =self.get_tokenizer()
a__ : Union[str, Any] =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Dict ="lower newer"
a__ : int =self.prepare_image_inputs()
a__ : Any =processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Union[str, Any] =self.get_image_processor()
a__ : Optional[Any] =self.get_tokenizer()
a__ : str =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : int =self.prepare_image_inputs()
a__ : Union[str, Any] =self.prepare_image_inputs()
a__ : Tuple =processor(images=lowerCAmelCase__ , visual_prompt=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] =self.get_image_processor()
a__ : Any =self.get_tokenizer()
a__ : Tuple =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Dict =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ : Optional[Any] =processor.batch_decode(lowerCAmelCase__ )
a__ : Dict =tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 95 | 0 |
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Optional[Any]:
if edge <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Any:
if edge <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 156 |
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(SCREAMING_SNAKE_CASE ) == 1:
return True
a__ : Union[str, Any] =series[1] - series[0]
for index in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Input list must be a non empty list" )
a__ : Any =0
for val in series:
answer += val
return answer / len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ (UpperCamelCase__ , unittest.TestCase ):
__magic_name__ = AudioLDMPipeline
__magic_name__ = TEXT_TO_AUDIO_PARAMS
__magic_name__ = TEXT_TO_AUDIO_BATCH_PARAMS
__magic_name__ = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(32, 64) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=lowerCAmelCase__ , )
UpperCAmelCase_ : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
UpperCAmelCase_ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ : int = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , )
UpperCAmelCase_ : str = ClapTextModelWithProjection(lowerCAmelCase__ )
UpperCAmelCase_ : int = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=77 )
UpperCAmelCase_ : Any = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=lowerCAmelCase__ , )
UpperCAmelCase_ : Dict = SpeechTaHifiGan(lowerCAmelCase__ )
UpperCAmelCase_ : Dict = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str=0 ) -> Optional[int]:
if str(lowerCAmelCase__ ).startswith("mps" ):
UpperCAmelCase_ : str = torch.manual_seed(lowerCAmelCase__ )
else:
UpperCAmelCase_ : List[Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
UpperCAmelCase_ : Union[str, Any] = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
UpperCAmelCase_ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : List[Any] = AudioLDMPipeline(**lowerCAmelCase__ )
UpperCAmelCase_ : Tuple = audioldm_pipe.to(lowerCAmelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase_ : int = self.get_dummy_inputs(lowerCAmelCase__ )
UpperCAmelCase_ : Optional[int] = audioldm_pipe(**lowerCAmelCase__ )
UpperCAmelCase_ : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase__ ) == 256
UpperCAmelCase_ : Optional[Any] = audio[:10]
UpperCAmelCase_ : Optional[int] = np.array(
[-0.0_0_5_0, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_3, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_3] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**lowerCAmelCase__ )
UpperCAmelCase_ : Tuple = audioldm_pipe.to(lowerCAmelCase__ )
UpperCAmelCase_ : List[str] = audioldm_pipe.to(lowerCAmelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase_ : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
UpperCAmelCase_ : Tuple = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : List[Any] = audioldm_pipe(**lowerCAmelCase__ )
UpperCAmelCase_ : Optional[int] = output.audios[0]
UpperCAmelCase_ : int = self.get_dummy_inputs(lowerCAmelCase__ )
UpperCAmelCase_ : List[Any] = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : str = audioldm_pipe.tokenizer(
lowerCAmelCase__ , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors="pt" , )
UpperCAmelCase_ : Any = text_inputs["input_ids"].to(lowerCAmelCase__ )
UpperCAmelCase_ : int = audioldm_pipe.text_encoder(
lowerCAmelCase__ , )
UpperCAmelCase_ : Any = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Any = F.normalize(lowerCAmelCase__ , dim=-1 )
UpperCAmelCase_ : List[Any] = prompt_embeds
# forward
UpperCAmelCase_ : int = audioldm_pipe(**lowerCAmelCase__ )
UpperCAmelCase_ : Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**lowerCAmelCase__ )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe.to(lowerCAmelCase__ )
UpperCAmelCase_ : str = audioldm_pipe.to(lowerCAmelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase_ : Any = self.get_dummy_inputs(lowerCAmelCase__ )
UpperCAmelCase_ : Optional[int] = 3 * ["this is a negative prompt"]
UpperCAmelCase_ : int = negative_prompt
UpperCAmelCase_ : List[str] = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : List[str] = audioldm_pipe(**lowerCAmelCase__ )
UpperCAmelCase_ : List[Any] = output.audios[0]
UpperCAmelCase_ : int = self.get_dummy_inputs(lowerCAmelCase__ )
UpperCAmelCase_ : Dict = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : Tuple = []
for p in [prompt, negative_prompt]:
UpperCAmelCase_ : Any = audioldm_pipe.tokenizer(
lowerCAmelCase__ , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors="pt" , )
UpperCAmelCase_ : Dict = text_inputs["input_ids"].to(lowerCAmelCase__ )
UpperCAmelCase_ : int = audioldm_pipe.text_encoder(
lowerCAmelCase__ , )
UpperCAmelCase_ : str = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : int = F.normalize(lowerCAmelCase__ , dim=-1 )
embeds.append(lowerCAmelCase__ )
UpperCAmelCase_ : str = embeds
# forward
UpperCAmelCase_ : List[Any] = audioldm_pipe(**lowerCAmelCase__ )
UpperCAmelCase_ : str = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
UpperCAmelCase_ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Tuple = self.get_dummy_components()
UpperCAmelCase_ : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
UpperCAmelCase_ : List[Any] = AudioLDMPipeline(**lowerCAmelCase__ )
UpperCAmelCase_ : Tuple = audioldm_pipe.to(lowerCAmelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase_ : List[Any] = self.get_dummy_inputs(lowerCAmelCase__ )
UpperCAmelCase_ : int = "egg cracking"
UpperCAmelCase_ : str = audioldm_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
UpperCAmelCase_ : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase__ ) == 256
UpperCAmelCase_ : Optional[Any] = audio[:10]
UpperCAmelCase_ : List[str] = np.array(
[-0.0_0_5_1, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_4, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_2] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
UpperCAmelCase_ : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
UpperCAmelCase_ : str = AudioLDMPipeline(**lowerCAmelCase__ )
UpperCAmelCase_ : Any = audioldm_pipe.to(lowerCAmelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase_ : int = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
UpperCAmelCase_ : int = audioldm_pipe(lowerCAmelCase__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCAmelCase_ : Dict = 2
UpperCAmelCase_ : Any = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
UpperCAmelCase_ : List[Any] = 2
UpperCAmelCase_ : Any = audioldm_pipe(lowerCAmelCase__ , num_inference_steps=2 , num_waveforms_per_prompt=lowerCAmelCase__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Any = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=lowerCAmelCase__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
UpperCAmelCase_ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : Any = AudioLDMPipeline(**lowerCAmelCase__ )
UpperCAmelCase_ : Tuple = audioldm_pipe.to(lowerCAmelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase_ : Union[str, Any] = audioldm_pipe.vocoder.config.sampling_rate
UpperCAmelCase_ : str = self.get_dummy_inputs(lowerCAmelCase__ )
UpperCAmelCase_ : Union[str, Any] = audioldm_pipe(audio_length_in_s=0.0_1_6 , **lowerCAmelCase__ )
UpperCAmelCase_ : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase__ ) / vocoder_sampling_rate == 0.0_1_6
UpperCAmelCase_ : Dict = audioldm_pipe(audio_length_in_s=0.0_3_2 , **lowerCAmelCase__ )
UpperCAmelCase_ : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase__ ) / vocoder_sampling_rate == 0.0_3_2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : Tuple = AudioLDMPipeline(**lowerCAmelCase__ )
UpperCAmelCase_ : List[str] = audioldm_pipe.to(lowerCAmelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase_ : str = ["hey"]
UpperCAmelCase_ : int = audioldm_pipe(lowerCAmelCase__ , num_inference_steps=1 )
UpperCAmelCase_ : str = output.audios.shape
assert audio_shape == (1, 256)
UpperCAmelCase_ : Any = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCAmelCase_ : Dict = SpeechTaHifiGan(lowerCAmelCase__ ).to(lowerCAmelCase__ )
UpperCAmelCase_ : List[str] = audioldm_pipe(lowerCAmelCase__ , num_inference_steps=1 )
UpperCAmelCase_ : Optional[Any] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowerCAmelCase__ )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase__ )
@slow
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]="cpu" , lowerCAmelCase_ : Tuple=torch.floataa , lowerCAmelCase_ : Optional[Any]=0 ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
UpperCAmelCase_ : str = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 8, 128, 16) )
UpperCAmelCase_ : Optional[Any] = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
UpperCAmelCase_ : List[str] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
UpperCAmelCase_ : Any = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : str = audioldm_pipe.to(lowerCAmelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase_ : Any = self.get_inputs(lowerCAmelCase__ )
UpperCAmelCase_ : Optional[Any] = 25
UpperCAmelCase_ : Union[str, Any] = audioldm_pipe(**lowerCAmelCase__ ).audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase__ ) == 81_920
UpperCAmelCase_ : Union[str, Any] = audio[77_230:77_240]
UpperCAmelCase_ : Union[str, Any] = np.array(
[-0.4_8_8_4, -0.4_6_0_7, 0.0_0_2_3, 0.5_0_0_7, 0.5_8_9_6, 0.5_1_5_1, 0.3_8_1_3, -0.0_2_0_8, -0.3_6_8_7, -0.4_3_1_5] )
UpperCAmelCase_ : Optional[Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
UpperCAmelCase_ : int = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : Optional[int] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCAmelCase_ : str = audioldm_pipe.to(lowerCAmelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase_ : Any = self.get_inputs(lowerCAmelCase__ )
UpperCAmelCase_ : List[Any] = audioldm_pipe(**lowerCAmelCase__ ).audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase__ ) == 81_920
UpperCAmelCase_ : int = audio[27_780:27_790]
UpperCAmelCase_ : Optional[int] = np.array([-0.2_1_3_1, -0.0_8_7_3, -0.0_1_2_4, -0.0_1_8_9, 0.0_5_6_9, 0.1_3_7_3, 0.1_8_8_3, 0.2_8_8_6, 0.3_2_9_7, 0.2_2_1_2] )
UpperCAmelCase_ : Any = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 268 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Tuple = """M-CLIP"""
def __init__( self , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=7_6_8 , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
a__ : int =transformerDimSize
a__ : Dict =imageDimSize
super().__init__(**lowerCAmelCase__ )
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Optional[Any] = MCLIPConfig
def __init__( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Tuple =XLMRobertaModel(lowerCAmelCase__ )
a__ : List[str] =torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] =self.transformer(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
a__ : int =(embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowerCAmelCase__ ), embs
| 95 | 0 |
def A__ ( SCREAMING_SNAKE_CASE__ = 50) -> Optional[int]:
__snake_case: Any = [1] * (length + 1)
for row_length in range(3 , length + 1):
for block_length in range(3 , row_length + 1):
for block_start in range(row_length - block_length):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 111 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase : Any = 16
UpperCAmelCase : str = 32
def _A ( SCREAMING_SNAKE_CASE : Accelerator , SCREAMING_SNAKE_CASE : int = 16 ):
"""simple docstring"""
a__ : int =AutoTokenizer.from_pretrained("bert-base-cased" )
a__ : List[str] =load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
a__ : int =tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__ : Dict =datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : Dict =tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__ : Optional[Any] =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__ : str =16
elif accelerator.mixed_precision != "no":
a__ : Union[str, Any] =8
else:
a__ : List[str] =None
return tokenizer.pad(
SCREAMING_SNAKE_CASE , padding="longest" , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
a__ : Any =DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
a__ : int =DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase : str = mocked_dataloaders # noqa: F811
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE ) == "1":
a__ : Tuple =2
# Initialize accelerator
a__ : int =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : Optional[int] =config["lr"]
a__ : Union[str, Any] =int(config["num_epochs"] )
a__ : Any =int(config["seed"] )
a__ : Dict =int(config["batch_size"] )
a__ : int =evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
a__ : int =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a__ : Dict =batch_size // MAX_GPU_BATCH_SIZE
a__ : Tuple =MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE )
a__ , a__ : Optional[int] =get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : List[str] =AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ : List[str] =model.to(accelerator.device )
# Instantiate optimizer
a__ : List[Any] =AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
# Instantiate scheduler
a__ : Optional[int] =get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : Optional[int] =accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
a__ : Dict =model(**SCREAMING_SNAKE_CASE )
a__ : List[Any] =outputs.loss
a__ : List[str] =loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
a__ : Optional[Any] =0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a__ : Any =model(**SCREAMING_SNAKE_CASE )
a__ : str =outputs.logits.argmax(dim=-1 )
a__ , a__ : List[str] =accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(SCREAMING_SNAKE_CASE ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
a__ : Optional[Any] =predictions[: len(eval_dataloader.dataset ) - samples_seen]
a__ : Dict =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
a__ : Tuple =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE )
def _A ( ):
"""simple docstring"""
a__ : List[str] =argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
a__ : str =parser.parse_args()
a__ : Optional[int] ={"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 95 | 0 |
from collections.abc import Generator
from math import sin
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if len(_UpperCAmelCase ) != 32:
raise ValueError('Input must be of length 32' )
lowerCAmelCase : Optional[Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
lowerCAmelCase : Optional[Any] = format(_UpperCAmelCase, '08x' )[-8:]
lowerCAmelCase : Tuple = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : Any = b""
for char in message:
bit_string += format(_UpperCAmelCase, '08b' ).encode('utf-8' )
lowerCAmelCase : Optional[Any] = format(len(_UpperCAmelCase ), '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_UpperCAmelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
if len(_UpperCAmelCase ) % 512 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0, len(_UpperCAmelCase ), 512 ):
lowerCAmelCase : List[str] = bit_string[pos : pos + 512]
lowerCAmelCase : Union[str, Any] = []
for i in range(0, 512, 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ), 2 ) )
yield block_words
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
lowerCAmelCase : Optional[int] = format(_UpperCAmelCase, '032b' )
lowerCAmelCase : Optional[int] = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_UpperCAmelCase, 2 )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return (a + b) % 2**32
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> str:
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : str = preprocess(_UpperCAmelCase )
lowerCAmelCase : List[str] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
lowerCAmelCase : List[Any] = 0X67_45_23_01
lowerCAmelCase : Optional[Any] = 0Xef_cd_ab_89
lowerCAmelCase : List[Any] = 0X98_ba_dc_fe
lowerCAmelCase : Any = 0X10_32_54_76
lowerCAmelCase : str = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_UpperCAmelCase ):
lowerCAmelCase : List[Any] = aa
lowerCAmelCase : List[Any] = ba
lowerCAmelCase : List[Any] = ca
lowerCAmelCase : List[str] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCAmelCase : Tuple = d ^ (b & (c ^ d))
lowerCAmelCase : str = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCAmelCase : Any = c ^ (d & (b ^ c))
lowerCAmelCase : str = (5 * i + 1) % 16
elif i <= 47:
lowerCAmelCase : Dict = b ^ c ^ d
lowerCAmelCase : Optional[Any] = (3 * i + 5) % 16
else:
lowerCAmelCase : Optional[Any] = c ^ (b | not_aa(_UpperCAmelCase ))
lowerCAmelCase : str = (7 * i) % 16
lowerCAmelCase : Optional[Any] = (f + a + added_consts[i] + block_words[g]) % 2**32
lowerCAmelCase : List[Any] = d
lowerCAmelCase : List[str] = c
lowerCAmelCase : int = b
lowerCAmelCase : Dict = sum_aa(_UpperCAmelCase, left_rotate_aa(_UpperCAmelCase, shift_amounts[i] ) )
# Add hashed chunk to running total
lowerCAmelCase : Union[str, Any] = sum_aa(_UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : List[Any] = sum_aa(_UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : int = sum_aa(_UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : List[str] = sum_aa(_UpperCAmelCase, _UpperCAmelCase )
lowerCAmelCase : Tuple = reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , ) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] =size if size is not None else {"shortest_edge": 2_0}
a__ : List[str] =crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
a__ : Tuple =parent
a__ : Union[str, Any] =batch_size
a__ : List[str] =num_channels
a__ : List[Any] =image_size
a__ : str =min_resolution
a__ : Optional[int] =max_resolution
a__ : Tuple =do_resize
a__ : Union[str, Any] =size
a__ : List[Any] =do_center_crop
a__ : List[str] =crop_size
a__ : Optional[int] =do_flip_channel_order
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : int = MobileViTImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple =MobileViTImageProcessingTester(self )
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_flip_channel_order" ) )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
a__ : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
a__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : List[Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
a__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : int =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : int =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
a__ : List[str] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : List[str] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 95 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {"""vocab_file""": """spiece.model"""}
_lowerCamelCase : Tuple = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
_lowerCamelCase : int = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
_lowerCamelCase : List[Any] = """▁"""
class lowercase ( UpperCamelCase__):
__lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]=True , _lowerCamelCase : int=True , _lowerCamelCase : List[Any]=False , _lowerCamelCase : Union[str, Any]="[CLS]" , _lowerCamelCase : str="[SEP]" , _lowerCamelCase : List[Any]="<unk>" , _lowerCamelCase : Optional[Any]="[SEP]" , _lowerCamelCase : List[str]="<pad>" , _lowerCamelCase : Optional[Any]="[CLS]" , _lowerCamelCase : List[Any]="[MASK]" , _lowerCamelCase : Optional[int] = None , **_lowerCamelCase : Any , ):
"""simple docstring"""
A_ : List[str] = (
AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ , normalized=lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else mask_token
)
A_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
A_ : Tuple = do_lower_case
A_ : int = remove_space
A_ : List[Any] = keep_accents
A_ : List[Any] = vocab_file
A_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
@property
def a_ ( self : Optional[int] ):
"""simple docstring"""
return len(self.sp_model )
def a_ ( self : Optional[int] ):
"""simple docstring"""
A_ : Tuple = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
"""simple docstring"""
A_ : Union[str, Any] = self.__dict__.copy()
A_ : Optional[int] = None
return state
def __setstate__( self : Dict , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
A_ : Optional[int] = {}
A_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self : List[Any] , _lowerCamelCase : List[str] ):
"""simple docstring"""
if self.remove_space:
A_ : Dict = " ".join(inputs.strip().split() )
else:
A_ : List[Any] = inputs
A_ : str = outputs.replace('''``''' , '''\"''' ).replace('''\'\'''' , '''\"''' )
if not self.keep_accents:
A_ : List[Any] = unicodedata.normalize('''NFKD''' , lowerCAmelCase__ )
A_ : Any = "".join([c for c in outputs if not unicodedata.combining(lowerCAmelCase__ )] )
if self.do_lower_case:
A_ : int = outputs.lower()
return outputs
def a_ ( self : Optional[int] , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : Optional[int] = self.preprocess_text(lowerCAmelCase__ )
A_ : Optional[Any] = self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
A_ : Optional[int] = []
for piece in pieces:
if len(lowerCAmelCase__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
A_ : Optional[int] = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase__ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A_ : Optional[Any] = cur_pieces[1:]
else:
A_ : List[str] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCAmelCase__ )
else:
new_pieces.append(lowerCAmelCase__ )
return new_pieces
def a_ ( self : str , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
return self.sp_model.PieceToId(lowerCAmelCase__ )
def a_ ( self : Dict , _lowerCamelCase : Any ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCAmelCase__ )
def a_ ( self : Tuple , _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : Dict = []
A_ : Tuple = ""
A_ : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
A_ : str = True
A_ : Tuple = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
A_ : List[Any] = False
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def a_ ( self : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] = None ):
"""simple docstring"""
A_ : Union[str, Any] = [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a_ ( self : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] = None , _lowerCamelCase : int = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def a_ ( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : int = None ):
"""simple docstring"""
A_ : Tuple = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a_ ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : int = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A_ : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , '''wb''' ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
| 167 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , ) -> Optional[int]:
'''simple docstring'''
a__ : str =size if size is not None else {"shortest_edge": 2_0}
a__ : Union[str, Any] =crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
a__ : Tuple =parent
a__ : Optional[int] =batch_size
a__ : Any =num_channels
a__ : List[str] =image_size
a__ : Dict =min_resolution
a__ : List[Any] =max_resolution
a__ : Dict =do_resize
a__ : Union[str, Any] =size
a__ : str =do_center_crop
a__ : List[str] =crop_size
def _lowercase ( self ) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] =MobileNetVaImageProcessingTester(self )
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "crop_size" ) )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Any =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
a__ : Dict =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def _lowercase ( self ) -> Any:
'''simple docstring'''
pass
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
a__ : List[Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : Dict =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : str =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
a__ : List[str] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : Union[str, Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Any =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
a__ : Optional[Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : str =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 95 | 0 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase ) -> List[Any]:
lowerCAmelCase_ : Any = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowerCAmelCase_ : List[Any] = len(lowerCAmelCase__ ) - 1
def lowercase_ ( self , __lowercase ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCAmelCase_ : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , lowerCAmelCase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowerCAmelCase__ ) , 5 ) == 1
return output_values
def lowercase_ ( self , __lowercase ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCAmelCase_ : Optional[Any] = self.basis_function(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = 0.0
lowerCAmelCase_ : Dict = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowercase_ ( self , __lowercase = 0.01 ) -> Any:
from matplotlib import pyplot as plt # type: ignore
lowerCAmelCase_ : list[float] = [] # x coordinates of points to plot
lowerCAmelCase_ : list[float] = [] # y coordinates of points to plot
lowerCAmelCase_ : Union[str, Any] = 0.0
while t <= 1:
lowerCAmelCase_ : Optional[int] = self.bezier_curve_function(lowerCAmelCase__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowerCAmelCase_ : List[Any] = [i[0] for i in self.list_of_points]
lowerCAmelCase_ : Dict = [i[1] for i in self.list_of_points]
plt.plot(
lowerCAmelCase__ , lowerCAmelCase__ , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(lowerCAmelCase__ , lowerCAmelCase__ , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3 | 262 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Any = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 95 | 0 |
import functools
from typing import Any
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : list[str] ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not all(
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
__lowerCAmelCase = {}
__lowerCAmelCase = "WORD_KEEPER"
for word in words:
__lowerCAmelCase = trie
for c in word:
if c not in trie_node:
__lowerCAmelCase = {}
__lowerCAmelCase = trie_node[c]
__lowerCAmelCase = True
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
# Dynamic programming method
@functools.cache
def is_breakable(SCREAMING_SNAKE_CASE_ : int ) -> bool:
if index == len_string:
return True
__lowerCAmelCase = trie
for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = trie_node.get(string[i] , SCREAMING_SNAKE_CASE_ )
if trie_node is None:
return False
if trie_node.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Any = """swin2sr"""
_lowercase : Tuple = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowerCAmelCase__=6_4 , lowerCAmelCase__=1 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8_0 , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=8 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=2 , lowerCAmelCase__=1.0 , lowerCAmelCase__="1conv" , lowerCAmelCase__="pixelshuffle" , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
a__ : Optional[Any] =image_size
a__ : Dict =patch_size
a__ : Tuple =num_channels
a__ : Union[str, Any] =embed_dim
a__ : Optional[Any] =depths
a__ : List[str] =len(lowerCAmelCase__ )
a__ : Any =num_heads
a__ : Any =window_size
a__ : str =mlp_ratio
a__ : List[str] =qkv_bias
a__ : Dict =hidden_dropout_prob
a__ : List[str] =attention_probs_dropout_prob
a__ : Dict =drop_path_rate
a__ : Optional[Any] =hidden_act
a__ : Union[str, Any] =use_absolute_embeddings
a__ : Optional[Any] =layer_norm_eps
a__ : List[Any] =initializer_range
a__ : int =upscale
a__ : Optional[int] =img_range
a__ : Any =resi_connection
a__ : Optional[Any] =upsampler
| 95 | 0 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self , *a , **a ) -> None:
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 178 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __lowerCAmelCase :
pass
| 95 | 0 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase, _lowerCAmelCase ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_lowerCAmelCase ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(_lowerCAmelCase ) == 1:
return True
_a = series[1] - series[0]
for index in range(len(_lowerCAmelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase, _lowerCAmelCase ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_lowerCAmelCase ) == 0:
raise ValueError('''Input list must be a non empty list''' )
_a = 0
for val in series:
answer += val
return answer / len(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 320 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Optional[Any] = """philschmid/bart-large-cnn-samsum"""
_lowercase : List[Any] = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
_lowercase : Any = """summarizer"""
_lowercase : Any = AutoTokenizer
_lowercase : str = AutoModelForSeqaSeqLM
_lowercase : Optional[int] = ["""text"""]
_lowercase : Optional[int] = ["""text"""]
def _lowercase ( self , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
return self.pre_processor(lowerCAmelCase__ , return_tensors="pt" , truncation=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.model.generate(**lowerCAmelCase__ )[0]
def _lowercase ( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return self.pre_processor.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
| 95 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
SCREAMING_SNAKE_CASE :Any = random.Random()
def UpperCAmelCase ( a_ , a_=1.0 , a_=None , a_=None ) -> Dict:
"""simple docstring"""
if rng is None:
__A = global_rng
__A = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict ,A : int ,A : List[str]=7 ,A : Tuple=4_00 ,A : Union[str, Any]=20_00 ,A : Any=24 ,A : Union[str, Any]=24 ,A : Tuple=0.0 ,A : str=1_60_00 ,A : List[Any]=True ,A : List[Any]=True ,):
__A = parent
__A = batch_size
__A = min_seq_length
__A = max_seq_length
__A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__A = feature_size
__A = num_mel_bins
__A = padding_value
__A = sampling_rate
__A = return_attention_mask
__A = do_normalize
def UpperCamelCase_ ( self : Optional[int] ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self : int ,A : int=False ,A : str=False ):
def _flatten(A : Any ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
__A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__A = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
__A = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
snake_case_ = SpeechaTextFeatureExtractor if is_speech_available() else None
def UpperCamelCase_ ( self : Dict ):
__A = SpeechaTextFeatureExtractionTester(self )
def UpperCamelCase_ ( self : List[str] ,A : Union[str, Any] ):
self.assertTrue(np.all(np.mean(lowerCAmelCase__ ,axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ ,axis=0 ) - 1 ) < 1E-3 ) )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__A = [floats_list((1, x) )[0] for x in range(8_00 ,14_00 ,2_00 )]
__A = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
__A = feature_extractor(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
__A = feature_extractor(speech_inputs[0] ,return_tensors="np" ).input_features
__A = feature_extractor(np_speech_inputs[0] ,return_tensors="np" ).input_features
self.assertTrue(np.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,atol=1E-3 ) )
# Test batched
__A = feature_extractor(lowerCAmelCase__ ,return_tensors="np" ).input_features
__A = feature_extractor(lowerCAmelCase__ ,return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ ,lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__A = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__A = np.asarray(lowerCAmelCase__ )
__A = feature_extractor(lowerCAmelCase__ ,return_tensors="np" ).input_features
__A = feature_extractor(lowerCAmelCase__ ,return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ ,lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,atol=1E-3 ) )
def UpperCamelCase_ ( self : Any ):
__A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A = [floats_list((1, x) )[0] for x in range(8_00 ,14_00 ,2_00 )]
__A = ["longest", "max_length", "do_not_pad"]
__A = [None, 16, None]
for max_length, padding in zip(lowerCAmelCase__ ,lowerCAmelCase__ ):
__A = feature_extractor(
lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ )
__A = inputs.input_features
__A = inputs.attention_mask
__A = [np.sum(lowerCAmelCase__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def UpperCamelCase_ ( self : int ):
__A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A = [floats_list((1, x) )[0] for x in range(8_00 ,14_00 ,2_00 )]
__A = ["longest", "max_length", "do_not_pad"]
__A = [None, 16, None]
for max_length, padding in zip(lowerCAmelCase__ ,lowerCAmelCase__ ):
__A = feature_extractor(
lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding=lowerCAmelCase__ ,return_tensors="np" ,return_attention_mask=lowerCAmelCase__ )
__A = inputs.input_features
__A = inputs.attention_mask
__A = [np.sum(lowerCAmelCase__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def UpperCamelCase_ ( self : int ):
__A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A = [floats_list((1, x) )[0] for x in range(8_00 ,14_00 ,2_00 )]
__A = feature_extractor(
lowerCAmelCase__ ,padding="max_length" ,max_length=4 ,truncation=lowerCAmelCase__ ,return_tensors="np" ,return_attention_mask=lowerCAmelCase__ ,)
__A = inputs.input_features
__A = inputs.attention_mask
__A = np.sum(attention_mask == 1 ,axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def UpperCamelCase_ ( self : List[str] ):
__A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A = [floats_list((1, x) )[0] for x in range(8_00 ,14_00 ,2_00 )]
__A = feature_extractor(
lowerCAmelCase__ ,padding="longest" ,max_length=4 ,truncation=lowerCAmelCase__ ,return_tensors="np" ,return_attention_mask=lowerCAmelCase__ ,)
__A = inputs.input_features
__A = inputs.attention_mask
__A = np.sum(attention_mask == 1 ,axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape ,(3, 4, 24) )
__A = [floats_list((1, x) )[0] for x in range(8_00 ,14_00 ,2_00 )]
__A = feature_extractor(
lowerCAmelCase__ ,padding="longest" ,max_length=16 ,truncation=lowerCAmelCase__ ,return_tensors="np" ,return_attention_mask=lowerCAmelCase__ ,)
__A = inputs.input_features
__A = inputs.attention_mask
__A = np.sum(attention_mask == 1 ,axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape ,(3, 6, 24) )
def UpperCamelCase_ ( self : Optional[Any] ):
import torch
__A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A = np.random.rand(1_00 ,32 ).astype(np.floataa )
__A = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__A = feature_extractor.pad([{"input_features": inputs}] ,return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__A = feature_extractor.pad([{"input_features": inputs}] ,return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase_ ( self : str ,A : Tuple ):
from datasets import load_dataset
__A = load_dataset("hf-internal-testing/librispeech_asr_dummy" ,"clean" ,split="validation" )
# automatic decoding with librispeech
__A = ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCamelCase_ ( self : Optional[Any] ):
__A = np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
] )
# fmt: on
__A = self._load_datasamples(1 )
__A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__A = feature_extractor(lowerCAmelCase__ ,return_tensors="pt" ).input_features
self.assertEquals(input_features.shape ,(1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] ,lowerCAmelCase__ ,atol=1E-4 ) )
| 15 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase : List[Any] = [
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
UpperCAmelCase : Optional[int] = {
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase : Optional[int] = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
UpperCAmelCase : Optional[int] = {F"""funnel-transformer/{name}""": {"""do_lower_case""": True} for name in _model_names}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_INIT_CONFIGURATION
_lowercase : Union[str, Any] = FunnelTokenizer
_lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : int = 2
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<sep>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<cls>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__="##" , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , clean_text=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , wordpieces_prefix=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : Optional[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
a__ : List[str] =getattr(lowerCAmelCase__ , normalizer_state.pop("type" ) )
a__ : Union[str, Any] =do_lower_case
a__ : Any =strip_accents
a__ : Optional[Any] =tokenize_chinese_chars
a__ : Dict =normalizer_class(**lowerCAmelCase__ )
a__ : Any =do_lower_case
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> str:
'''simple docstring'''
a__ : Dict =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
a__ : Optional[int] =[self.sep_token_id]
a__ : Union[str, Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
a__ : Tuple =self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 95 | 0 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _UpperCamelCase ( enum.Enum ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
UpperCAmelCase_ = 2
@add_end_docstrings(UpperCamelCase__ )
class _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase_ = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self :List[str] , *lowerCamelCase :str , **lowerCamelCase :Any ) -> Optional[Any]:
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCAmelCase__ = None
if self.model.config.prefix is not None:
UpperCAmelCase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCAmelCase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCAmelCase__ = self._sanitize_parameters(prefix=lowerCAmelCase__ , **self._forward_params )
UpperCAmelCase__ = {**self._preprocess_params, **preprocess_params}
UpperCAmelCase__ = {**self._forward_params, **forward_params}
def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :str=None , lowerCamelCase :List[Any]=None , lowerCamelCase :Dict=None , lowerCamelCase :Dict=None , lowerCamelCase :int=None , lowerCamelCase :int=None , lowerCamelCase :Tuple=None , lowerCamelCase :Dict=None , **lowerCamelCase :List[str] , ) -> Tuple:
UpperCAmelCase__ = {}
if prefix is not None:
UpperCAmelCase__ = prefix
if prefix:
UpperCAmelCase__ = self.tokenizer(
lowerCAmelCase__ , padding=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=self.framework )
UpperCAmelCase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
UpperCAmelCase__ = handle_long_generation
preprocess_params.update(lowerCAmelCase__ )
UpperCAmelCase__ = generate_kwargs
UpperCAmelCase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
UpperCAmelCase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
UpperCAmelCase__ = ReturnType.TENSORS
if return_type is not None:
UpperCAmelCase__ = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase__ = self.tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
UpperCAmelCase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase_ ( self :Optional[Any] , *lowerCamelCase :Any , **lowerCamelCase :Union[str, Any] ) -> int:
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __call__( self :Union[str, Any] , lowerCamelCase :int , **lowerCamelCase :Any ) -> Any:
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :Optional[Any] , lowerCamelCase :Optional[int]="" , lowerCamelCase :Any=None , **lowerCamelCase :str ) -> Optional[Any]:
UpperCAmelCase__ = self.tokenizer(
prefix + prompt_text , padding=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=self.framework )
UpperCAmelCase__ = prompt_text
if handle_long_generation == "hole":
UpperCAmelCase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCAmelCase__ = generate_kwargs["max_new_tokens"]
else:
UpperCAmelCase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCAmelCase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
UpperCAmelCase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
UpperCAmelCase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def UpperCAmelCase_ ( self :Union[str, Any] , lowerCamelCase :Tuple , **lowerCamelCase :int ) -> List[str]:
UpperCAmelCase__ = model_inputs["input_ids"]
UpperCAmelCase__ = model_inputs.get("attention_mask" , lowerCAmelCase__ )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = 1
else:
UpperCAmelCase__ = input_ids.shape[0]
UpperCAmelCase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCAmelCase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
UpperCAmelCase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCAmelCase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCAmelCase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCAmelCase__ = self.model.generate(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase__ = generated_sequence.shape[0]
if self.framework == "pt":
UpperCAmelCase__ = generated_sequence.reshape(lowerCAmelCase__ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase__ = tf.reshape(lowerCAmelCase__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCAmelCase_ ( self :Tuple , lowerCamelCase :List[Any] , lowerCamelCase :Optional[int]=ReturnType.FULL_TEXT , lowerCamelCase :List[Any]=True ) -> Union[str, Any]:
UpperCAmelCase__ = model_outputs["generated_sequence"][0]
UpperCAmelCase__ = model_outputs["input_ids"]
UpperCAmelCase__ = model_outputs["prompt_text"]
UpperCAmelCase__ = generated_sequence.numpy().tolist()
UpperCAmelCase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCAmelCase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCAmelCase__ = self.tokenizer.decode(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCAmelCase__ = prompt_text + text[prompt_length:]
else:
UpperCAmelCase__ = text[prompt_length:]
UpperCAmelCase__ = {"generated_text": all_text}
records.append(lowerCAmelCase__ )
return records
| 169 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = "arrow" , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : int =load_from_cache_file
a__ : Tuple =file_format
a__ : List[Any] =Spark(
df=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , working_dir=lowerCAmelCase__ , **lowerCAmelCase__ , )
def _lowercase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
a__ : str =None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCAmelCase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 95 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase : Optional[Any] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowerCAmelCase : Optional[Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowerCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowerCAmelCase : str = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
__lowerCAmelCase : Union[str, Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
__lowerCAmelCase : str = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
__lowerCAmelCase : Optional[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
__lowerCAmelCase : Union[str, Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
__lowerCAmelCase : Dict = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __lowerCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ : Dict = VOCAB_FILES_NAMES
A__ : List[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : str = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ : Dict = DPRContextEncoderTokenizer
class __lowerCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ : List[str] = VOCAB_FILES_NAMES
A__ : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A__ : int = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ : str = DPRQuestionEncoderTokenizer
__lowerCAmelCase : List[str] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
__lowerCAmelCase : List[str] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
__lowerCAmelCase : Tuple = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCamelCase__ )
class __lowerCAmelCase :
"""simple docstring"""
def __call__( self : Union[str, Any] , _snake_case : Dict , _snake_case : Union[str, Any] = None , _snake_case : Dict = None , _snake_case : str = False , _snake_case : List[str] = False , _snake_case : int = None , _snake_case : Optional[Any] = None , _snake_case : Optional[Any] = None , **_snake_case : Optional[Any] , ):
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
elif titles is None or texts is None:
__lowercase : Optional[Any] = titles if texts is None else texts
return super().__call__(
lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
__lowercase : Union[str, Any] = titles if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [titles]
__lowercase : List[str] = texts if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [texts]
__lowercase : int = len(lowerCAmelCase__ )
__lowercase : str = questions if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [questions] * n_passages
assert len(lowerCAmelCase__ ) == len(
lowerCAmelCase__ ), F'There should be as many titles than texts but got {len(lowerCAmelCase__ )} titles and {len(lowerCAmelCase__ )} texts.'
__lowercase : int = super().__call__(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )["input_ids"]
__lowercase : str = super().__call__(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )["input_ids"]
__lowercase : Optional[Any] = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
}
if return_attention_mask is not False:
__lowercase : List[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowercase : Tuple = attention_mask
return self.pad(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
def snake_case_ ( self : Dict , _snake_case : Tuple , _snake_case : int , _snake_case : Optional[Any] = 16 , _snake_case : Dict = 64 , _snake_case : Optional[Any] = 4 , ):
__lowercase : Optional[Any] = reader_input["input_ids"]
__lowercase : Union[str, Any] = reader_output[:3]
__lowercase : Optional[int] = len(lowerCAmelCase__ )
__lowercase : Optional[Any] = sorted(range(lowerCAmelCase__ ) , reverse=lowerCAmelCase__ , key=relevance_logits.__getitem__ )
__lowercase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__lowercase : List[str] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowercase : Union[str, Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowercase : int = sequence_ids.index(self.pad_token_id )
else:
__lowercase : Optional[Any] = len(lowerCAmelCase__ )
__lowercase : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase__ , top_spans=lowerCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase__ , start_index=lowerCAmelCase__ , end_index=lowerCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCAmelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def snake_case_ ( self : Tuple , _snake_case : int , _snake_case : Any , _snake_case : str , _snake_case : List[Any] , ):
__lowercase : Optional[int] = []
for start_index, start_score in enumerate(lowerCAmelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowercase : str = sorted(lowerCAmelCase__ , key=lambda _snake_case : x[1] , reverse=lowerCAmelCase__ )
__lowercase : Any = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'Wrong span indices: [{start_index}:{end_index}]'
__lowercase : List[Any] = end_index - start_index + 1
assert length <= max_answer_length, F'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase__ )
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : List[str] = READER_PRETRAINED_VOCAB_FILES_MAP
A__ : str = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[str] = READER_PRETRAINED_INIT_CONFIGURATION
A__ : str = ["""input_ids""", """attention_mask"""]
A__ : List[str] = DPRReaderTokenizer
| 156 |
from math import pi
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 95 | 0 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
lowerCamelCase_ = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
lowerCamelCase_ = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def snake_case ( A__ ):
UpperCAmelCase_ : Tuple = (images / 2 + 0.5).clamp(0 ,1 )
UpperCAmelCase_ : Any = images.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
UpperCAmelCase_ : Optional[Any] = numpy_to_pil(A__ )
return images
def snake_case ( A__ ):
if images.ndim == 3:
UpperCAmelCase_ : int = images[None, ...]
UpperCAmelCase_ : List[str] = (images * 2_55).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
UpperCAmelCase_ : Optional[int] = [Image.fromarray(image.squeeze() ,mode="L" ) for image in images]
else:
UpperCAmelCase_ : List[Any] = [Image.fromarray(A__ ) for image in images]
return pil_images
| 268 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCAmelCase : int = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
a__ : Optional[int] =XLNetConfig.from_json_file(SCREAMING_SNAKE_CASE )
a__ : Dict =finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
a__ : List[str] =finetuning_task
a__ : Tuple =GLUE_TASKS_NUM_LABELS[finetuning_task]
a__ : List[Any] =XLNetForSequenceClassification(SCREAMING_SNAKE_CASE )
elif "squad" in finetuning_task:
a__ : Optional[int] =finetuning_task
a__ : Dict =XLNetForQuestionAnswering(SCREAMING_SNAKE_CASE )
else:
a__ : List[Any] =XLNetLMHeadModel(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'''Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
print(f'''Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
UpperCAmelCase : int = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 95 | 0 |
from __future__ import annotations
import math
__UpperCAmelCase : Any = """2020.9.26"""
__UpperCAmelCase : Optional[Any] = """xcodz-dot, cclaus, dhruvmanila"""
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Tuple:
if not all(isinstance(SCREAMING_SNAKE_CASE__ , (float, int)) for val in locals().values()):
__snake_case: Optional[Any] = F'''Input values must either be float or int: {list(locals().values())}'''
raise TypeError(SCREAMING_SNAKE_CASE__)
__snake_case: List[str] = ((x * distance) / (z + distance)) * scale
__snake_case: Dict = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
raise TypeError("""Axis must be a str""")
__snake_case: Optional[int] = locals()
del input_variables["axis"]
if not all(isinstance(SCREAMING_SNAKE_CASE__ , (float, int)) for val in input_variables.values()):
__snake_case: List[Any] = (
"Input values except axis must either be float or int: "
F'''{list(input_variables.values())}'''
)
raise TypeError(SCREAMING_SNAKE_CASE__)
__snake_case: List[Any] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
__snake_case: Tuple = x * math.cos(SCREAMING_SNAKE_CASE__) - y * math.sin(SCREAMING_SNAKE_CASE__)
__snake_case: int = y * math.cos(SCREAMING_SNAKE_CASE__) + x * math.sin(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = z
elif axis == "x":
__snake_case: str = y * math.cos(SCREAMING_SNAKE_CASE__) - z * math.sin(SCREAMING_SNAKE_CASE__)
__snake_case: Union[str, Any] = z * math.cos(SCREAMING_SNAKE_CASE__) + y * math.sin(SCREAMING_SNAKE_CASE__)
__snake_case: str = x
elif axis == "y":
__snake_case: List[str] = x * math.cos(SCREAMING_SNAKE_CASE__) - z * math.sin(SCREAMING_SNAKE_CASE__)
__snake_case: Any = z * math.cos(SCREAMING_SNAKE_CASE__) + x * math.sin(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = y
else:
raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""")
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }')
print(f'{rotate(1.0, 2.0, 3.0, "y", 90.0) = }')
| 111 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : List[Any] = """canine"""
def __init__( self , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_6_3_8_4 , lowerCAmelCase__=1_6 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__=0XE0_00 , lowerCAmelCase__=0XE0_01 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=8 , lowerCAmelCase__=1_6_3_8_4 , lowerCAmelCase__=1_2_8 , **lowerCAmelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Optional[int] =max_position_embeddings
a__ : str =hidden_size
a__ : Optional[Any] =num_hidden_layers
a__ : Tuple =num_attention_heads
a__ : Optional[Any] =intermediate_size
a__ : Optional[int] =hidden_act
a__ : List[Any] =hidden_dropout_prob
a__ : Union[str, Any] =attention_probs_dropout_prob
a__ : Optional[Any] =initializer_range
a__ : Union[str, Any] =type_vocab_size
a__ : Optional[int] =layer_norm_eps
# Character config:
a__ : int =downsampling_rate
a__ : Optional[Any] =upsampling_kernel_size
a__ : Union[str, Any] =num_hash_functions
a__ : Any =num_hash_buckets
a__ : int =local_transformer_stride
| 95 | 0 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__A : Optional[int] = get_logger(__name__)
class __A ( enum.Enum ):
lowerCAmelCase_ : Dict = """all_checks"""
lowerCAmelCase_ : int = """basic_checks"""
lowerCAmelCase_ : Union[str, Any] = """no_checks"""
class __A ( UpperCamelCase__ ):
pass
class __A ( UpperCamelCase__ ):
pass
class __A ( UpperCamelCase__ ):
pass
class __A ( UpperCamelCase__ ):
pass
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None ) -> str:
'''simple docstring'''
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
lowerCAmelCase : List[str] = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
lowerCAmelCase : Union[str, Any] = " for " + verification_name if verification_name is not None else ""
if len(_UpperCAmelCase ) > 0:
raise NonMatchingChecksumError(
f"Checksums didn\'t match{for_verification_name}:\n"
f"{bad_urls}\n"
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class __A ( UpperCamelCase__ ):
pass
class __A ( UpperCamelCase__ ):
pass
class __A ( UpperCamelCase__ ):
pass
class __A ( UpperCamelCase__ ):
pass
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
lowerCAmelCase : Optional[Any] = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_UpperCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(_UpperCAmelCase ) )
logger.info('All the splits matched successfully.' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase = True ) -> Tuple:
'''simple docstring'''
if record_checksum:
lowerCAmelCase : Tuple = shaaaa()
with open(_UpperCAmelCase, 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ), b'' ):
m.update(_UpperCAmelCase )
lowerCAmelCase : str = m.hexdigest()
else:
lowerCAmelCase : Union[str, Any] = None
return {"num_bytes": os.path.getsize(_UpperCAmelCase ), "checksum": checksum}
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 138 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCAmelCase : int = False
class __lowerCAmelCase ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : str =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
a__ : Optional[Any] =torch.manual_seed(0 )
a__ : Optional[Any] =pipe.dual_guided(
prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase__ )
a__ : str =VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : Optional[Any] =generator.manual_seed(0 )
a__ : Tuple =pipe.dual_guided(
prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : str =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : Optional[Any] ="cyberpunk 2077"
a__ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
a__ : Union[str, Any] =torch.manual_seed(0 )
a__ : Tuple =pipe.dual_guided(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" , ).images
a__ : int =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Any =np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
a__ : str ="A painting of a squirrel eating a burger "
a__ : Optional[int] =torch.manual_seed(0 )
a__ : str =pipe.text_to_image(
prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" ).images
a__ : Any =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Optional[int] =np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
a__ : Optional[Any] =pipe.image_variation(lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="numpy" ).images
a__ : Union[str, Any] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Any =np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 95 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowercase ( unittest.TestCase):
def a_ ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[Any] = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
A_ : Optional[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
A_ : Optional[Any] = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
A_ : Tuple = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_60_00,
"return_attention_mask": False,
"do_normalize": True,
}
A_ : List[str] = tempfile.mkdtemp()
A_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
A_ : int = os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '''\n''' )
# load decoder from hub
A_ : str = "hf-internal-testing/ngram-beam-search-decoder"
def a_ ( self : Tuple , **_lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : List[str] = self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCAmelCase__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def a_ ( self : Optional[int] , **_lowerCamelCase : Any ):
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def a_ ( self : str , **_lowerCamelCase : Tuple ):
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **lowerCAmelCase__ )
def a_ ( self : int ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Any = self.get_tokenizer()
A_ : str = self.get_feature_extractor()
A_ : Tuple = self.get_decoder()
A_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
A_ : List[Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowerCAmelCase__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , lowerCAmelCase__ )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
A_ : Tuple = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def a_ ( self : Any ):
"""simple docstring"""
A_ : Optional[Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(lowerCAmelCase__ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=lowerCAmelCase__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def a_ ( self : Optional[int] ):
"""simple docstring"""
A_ : List[Any] = self.get_feature_extractor()
A_ : List[Any] = self.get_tokenizer()
A_ : List[str] = self.get_decoder()
A_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
A_ : Optional[int] = floats_list((3, 10_00) )
A_ : int = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' )
A_ : Union[str, Any] = processor(lowerCAmelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
A_ : str = self.get_feature_extractor()
A_ : int = self.get_tokenizer()
A_ : Any = self.get_decoder()
A_ : Tuple = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
A_ : List[str] = "This is a test string"
A_ : Optional[int] = processor(text=lowerCAmelCase__ )
A_ : Any = tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self : str , _lowerCamelCase : Any=(2, 10, 16) , _lowerCamelCase : int=77 ):
"""simple docstring"""
np.random.seed(lowerCAmelCase__ )
return np.random.rand(*lowerCAmelCase__ )
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Optional[int] = self.get_feature_extractor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_decoder()
A_ : Tuple = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
A_ : int = self._get_dummy_logits(shape=(10, 16) , seed=13 )
A_ : int = processor.decode(lowerCAmelCase__ )
A_ : Optional[Any] = decoder.decode_beams(lowerCAmelCase__ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def a_ ( self : Optional[Any] , _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : Dict = self.get_feature_extractor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_decoder()
A_ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
A_ : List[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
A_ : List[Any] = processor.batch_decode(lowerCAmelCase__ )
else:
with get_context(lowerCAmelCase__ ).Pool() as pool:
A_ : Tuple = processor.batch_decode(lowerCAmelCase__ , lowerCAmelCase__ )
A_ : Optional[int] = list(lowerCAmelCase__ )
with get_context('''fork''' ).Pool() as p:
A_ : Any = decoder.decode_beams_batch(lowerCAmelCase__ , lowerCAmelCase__ )
A_ : Any = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCAmelCase__ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(lowerCAmelCase__ , decoded_processor.logit_score )
self.assertListEqual(lowerCAmelCase__ , decoded_processor.lm_score )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Optional[Any] = self.get_feature_extractor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_decoder()
A_ : List[str] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
A_ : Optional[Any] = self._get_dummy_logits()
A_ : List[Any] = 15
A_ : List[Any] = -20.0
A_ : str = -4.0
A_ : Union[str, Any] = processor.batch_decode(
lowerCAmelCase__ , beam_width=lowerCAmelCase__ , beam_prune_logp=lowerCAmelCase__ , token_min_logp=lowerCAmelCase__ , )
A_ : List[str] = decoded_processor_out.text
A_ : List[Any] = list(lowerCAmelCase__ )
with get_context('''fork''' ).Pool() as pool:
A_ : Dict = decoder.decode_beams_batch(
lowerCAmelCase__ , lowerCAmelCase__ , beam_width=lowerCAmelCase__ , beam_prune_logp=lowerCAmelCase__ , token_min_logp=lowerCAmelCase__ , )
A_ : str = [d[0][0] for d in decoded_decoder_out]
A_ : List[Any] = [d[0][2] for d in decoded_decoder_out]
A_ : List[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , lowerCAmelCase__ )
self.assertTrue(np.array_equal(lowerCAmelCase__ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , lowerCAmelCase__ , atol=1E-3 ) )
self.assertTrue(np.array_equal(lowerCAmelCase__ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , lowerCAmelCase__ , atol=1E-3 ) )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = self.get_feature_extractor()
A_ : List[Any] = self.get_tokenizer()
A_ : Optional[int] = self.get_decoder()
A_ : Dict = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
A_ : List[Any] = self._get_dummy_logits()
A_ : Dict = 2.0
A_ : int = 5.0
A_ : str = -20.0
A_ : Tuple = True
A_ : Tuple = processor.batch_decode(
lowerCAmelCase__ , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , unk_score_offset=lowerCAmelCase__ , lm_score_boundary=lowerCAmelCase__ , )
A_ : Union[str, Any] = decoded_processor_out.text
A_ : Tuple = list(lowerCAmelCase__ )
decoder.reset_params(
alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , unk_score_offset=lowerCAmelCase__ , lm_score_boundary=lowerCAmelCase__ , )
with get_context('''fork''' ).Pool() as pool:
A_ : str = decoder.decode_beams_batch(
lowerCAmelCase__ , lowerCAmelCase__ , )
A_ : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , lowerCAmelCase__ )
A_ : Dict = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , lowerCAmelCase__ )
def a_ ( self : str ):
"""simple docstring"""
A_ : Any = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
A_ : Dict = processor.decoder.model_container[processor.decoder._model_key]
A_ : Dict = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
A_ : Union[str, Any] = os.listdir(lowerCAmelCase__ )
A_ : Tuple = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def a_ ( self : Dict ):
"""simple docstring"""
A_ : List[str] = snapshot_download('''hf-internal-testing/processor_with_lm''' )
A_ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase__ )
A_ : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
A_ : str = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
A_ : List[str] = os.listdir(lowerCAmelCase__ )
A_ : Optional[Any] = os.listdir(lowerCAmelCase__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def a_ ( self : Dict ):
"""simple docstring"""
A_ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
A_ : str = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
A_ : str = floats_list((3, 10_00) )
A_ : int = processor_wavaveca(lowerCAmelCase__ , return_tensors='''np''' )
A_ : List[Any] = processor_auto(lowerCAmelCase__ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
A_ : int = self._get_dummy_logits()
A_ : Tuple = processor_wavaveca.batch_decode(lowerCAmelCase__ )
A_ : int = processor_auto.batch_decode(lowerCAmelCase__ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def a_ ( self : Any ):
"""simple docstring"""
A_ : List[str] = self.get_feature_extractor()
A_ : List[Any] = self.get_tokenizer()
A_ : Dict = self.get_decoder()
A_ : Tuple = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def a_ ( _lowerCamelCase : int , _lowerCamelCase : str ):
"""simple docstring"""
A_ : List[Any] = [d[key] for d in offsets]
return retrieved_list
def a_ ( self : Any ):
"""simple docstring"""
A_ : Any = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
A_ : Tuple = self._get_dummy_logits()[0]
A_ : Union[str, Any] = processor.decode(lowerCAmelCase__ , output_word_offsets=lowerCAmelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
A_ : List[str] = self._get_dummy_logits()
A_ : Union[str, Any] = processor.batch_decode(lowerCAmelCase__ , output_word_offsets=lowerCAmelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(lowerCAmelCase__ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a_ ( self : Optional[int] ):
"""simple docstring"""
import torch
A_ : Optional[Any] = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=lowerCAmelCase__ )
A_ : int = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
A_ : List[str] = iter(lowerCAmelCase__ )
A_ : Optional[int] = next(lowerCAmelCase__ )
A_ : Optional[Any] = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
A_ : Optional[int] = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
A_ : Any = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
A_ : Tuple = model(lowerCAmelCase__ ).logits.cpu().numpy()
A_ : Optional[int] = processor.decode(logits[0] , output_word_offsets=lowerCAmelCase__ )
A_ : int = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
A_ : int = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
A_ : Tuple = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase__ , '''word''' ) ) , lowerCAmelCase__ )
self.assertEqual(''' '''.join(self.get_from_offsets(lowerCAmelCase__ , '''word''' ) ) , output.text )
# output times
A_ : Dict = torch.tensor(self.get_from_offsets(lowerCAmelCase__ , '''start_time''' ) )
A_ : str = torch.tensor(self.get_from_offsets(lowerCAmelCase__ , '''end_time''' ) )
# fmt: off
A_ : Dict = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
A_ : int = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=0.01 ) )
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=0.01 ) )
| 167 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __lowerCAmelCase :
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
def _lowercase ( self ) -> int:
'''simple docstring'''
raise NotImplementedError()
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : str =tokenizer
a__ : List[str] =skip_prompt
a__ : List[Any] =decode_kwargs
# variables used in the streaming process
a__ : Dict =[]
a__ : int =0
a__ : str =True
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
a__ : Any =value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
a__ : Dict =False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
a__ : List[Any] =text[self.print_len :]
a__ : List[str] =[]
a__ : Optional[int] =0
# If the last token is a CJK character, we print the characters.
elif len(lowerCAmelCase__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
a__ : List[str] =text[self.print_len :]
self.print_len += len(lowerCAmelCase__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
a__ : str =text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(lowerCAmelCase__ )
self.on_finalized_text(lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
if len(self.token_cache ) > 0:
a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
a__ : List[Any] =text[self.print_len :]
a__ : List[str] =[]
a__ : Optional[int] =0
else:
a__ : Union[str, Any] =""
a__ : Any =True
self.on_finalized_text(lowerCAmelCase__ , stream_end=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Optional[Any]:
'''simple docstring'''
print(lowerCAmelCase__ , flush=lowerCAmelCase__ , end="" if not stream_end else None )
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : str =Queue()
a__ : Optional[Any] =None
a__ : Any =timeout
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> List[str]:
'''simple docstring'''
self.text_queue.put(lowerCAmelCase__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> Dict:
'''simple docstring'''
return self
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : int =self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 95 | 0 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=3 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase=True , __lowercase=9_9 , __lowercase=3_2 , __lowercase=5 , __lowercase=4 , __lowercase=3_7 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=1_6 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase=None , ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = parent
lowerCAmelCase_ : Tuple = batch_size
lowerCAmelCase_ : int = seq_length
lowerCAmelCase_ : Tuple = is_training
lowerCAmelCase_ : Any = use_input_mask
lowerCAmelCase_ : Optional[int] = use_token_type_ids
lowerCAmelCase_ : List[str] = use_labels
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : List[Any] = num_attention_heads
lowerCAmelCase_ : Union[str, Any] = intermediate_size
lowerCAmelCase_ : Tuple = hidden_act
lowerCAmelCase_ : Dict = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : Optional[int] = type_vocab_size
lowerCAmelCase_ : Any = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Optional[Any] = num_labels
lowerCAmelCase_ : int = num_choices
lowerCAmelCase_ : Optional[int] = scope
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : int = None
if self.use_input_mask:
lowerCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : Dict = None
if self.use_labels:
lowerCAmelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : str = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self ) -> str:
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=lowerCAmelCase__ , )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
lowerCAmelCase_ : List[Any] = FalconModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase_ : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
lowerCAmelCase_ : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Any:
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : Any = FalconModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase_ : int = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )
lowerCAmelCase_ : int = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )
lowerCAmelCase_ : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Union[str, Any]:
lowerCAmelCase_ : Any = FalconForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase_ : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict:
lowerCAmelCase_ : str = True
lowerCAmelCase_ : Union[str, Any] = True
lowerCAmelCase_ : str = FalconForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# first forward pass
lowerCAmelCase_ : int = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ , )
lowerCAmelCase_ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase_ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase_ : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase_ : str = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["hidden_states"][0]
lowerCAmelCase_ : Tuple = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["hidden_states"][0]
# select random slice
lowerCAmelCase_ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase_ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Dict = self.prepare_config_and_inputs()
(
lowerCAmelCase_
) : Tuple = config_and_inputs
lowerCAmelCase_ : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Any = (FalconForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : int = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : int = False
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : str = FalconModelTester(self )
lowerCAmelCase_ : Any = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 )
def lowercase_ ( self ) -> Dict:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowerCAmelCase_ : Any = alibi
self.model_tester.create_and_check_model(lowerCAmelCase__ , *lowerCAmelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : List[str] = 3
lowerCAmelCase_ : List[Any] = input_dict["input_ids"]
lowerCAmelCase_ : Tuple = input_ids.ne(1 ).to(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase_ : Tuple = FalconForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Optional[Any] = 3
lowerCAmelCase_ : Dict = "single_label_classification"
lowerCAmelCase_ : Optional[int] = input_dict["input_ids"]
lowerCAmelCase_ : Any = input_ids.ne(1 ).to(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase_ : Dict = FalconForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase_ : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : str = input_dict["input_ids"]
lowerCAmelCase_ : Any = FalconForCausalLM(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase_ : Union[str, Any] = model(lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = input_ids.shape[0]
lowerCAmelCase_ : Union[str, Any] = model._convert_to_rw_cache(result.past_key_values )
lowerCAmelCase_ : int = model._convert_cache_to_standard_format(lowerCAmelCase__ , lowerCAmelCase__ )
for layer in range(len(lowerCAmelCase__ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : List[str] = 3
lowerCAmelCase_ : Union[str, Any] = "multi_label_classification"
lowerCAmelCase_ : Optional[int] = input_dict["input_ids"]
lowerCAmelCase_ : List[Any] = input_ids.ne(1 ).to(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase_ : Tuple = FalconForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase_ : Tuple = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self ) -> Optional[int]:
for model_class in self.all_generative_model_classes:
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(lowerCAmelCase__ , '''use_cache''' ):
return
lowerCAmelCase_ : int = model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ )
if "use_cache" not in inputs:
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : Dict = model(**lowerCAmelCase__ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowerCAmelCase_ : int = (
getattr(lowerCAmelCase__ , '''decoder_layers''' , lowerCAmelCase__ )
or getattr(lowerCAmelCase__ , '''num_decoder_layers''' , lowerCAmelCase__ )
or config.num_hidden_layers
)
lowerCAmelCase_ : str = getattr(lowerCAmelCase__ , '''num_kv_heads''' , config.num_attention_heads )
lowerCAmelCase_ : Tuple = getattr(lowerCAmelCase__ , '''d_model''' , config.hidden_size )
lowerCAmelCase_ : Any = embed_dim // num_attention_heads
lowerCAmelCase_ : List[Any] = outputs["past_key_values"]
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = inputs["input_ids"].shape
for i in range(lowerCAmelCase__ ):
if config.new_decoder_architecture:
lowerCAmelCase_ : Dict = config.num_attention_heads
elif config.multi_query:
lowerCAmelCase_ : List[str] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : str = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
lowerCAmelCase_ : str = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(lowerCAmelCase__ )
lowerCAmelCase_ : int = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(lowerCAmelCase__ )
lowerCAmelCase_ : str = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
lowerCAmelCase_ : Tuple = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=1_9 )
lowerCAmelCase_ : Tuple = tokenizer.batch_decode(lowerCAmelCase__ )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def lowercase_ ( self ) -> List[Any]:
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
lowerCAmelCase_ : Any = FalconForCausalLM.from_pretrained(lowerCAmelCase__ )
model.eval()
model.to(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=4 )
model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=4 )
model.generate(**lowerCAmelCase__ , num_beams=2 , max_new_tokens=4 )
@slow
def lowercase_ ( self ) -> Optional[Any]:
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowerCAmelCase_ : str = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
lowerCAmelCase_ : int = FalconForCausalLM.from_pretrained(lowerCAmelCase__ )
model.eval()
model.to(device=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(lowerCAmelCase__ )
# Test results are the same with and without cache
lowerCAmelCase_ : Optional[int] = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=2_0 , use_cache=lowerCAmelCase__ )
lowerCAmelCase_ : Any = model.generate(**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , max_new_tokens=2_0 , use_cache=lowerCAmelCase__ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 ) | 262 |
def _A ( SCREAMING_SNAKE_CASE : int = 50 ):
"""simple docstring"""
a__ : Any =[1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 95 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = """Hello, World!"""
UpperCamelCase__ = """en_XX"""
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool ):
__lowerCAmelCase = Path("data_bin" )
__lowerCAmelCase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE_ ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE_ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(SCREAMING_SNAKE_CASE_ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE_ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = xmod.model.encoder.sentence_encoder
__lowerCAmelCase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowerCAmelCase = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = XmodForSequenceClassification(SCREAMING_SNAKE_CASE_ ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowerCAmelCase = xmod_sent_encoder.embed_tokens.weight
__lowerCAmelCase = xmod_sent_encoder.embed_positions.weight
__lowerCAmelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowerCAmelCase = xmod_sent_encoder.layernorm_embedding.weight
__lowerCAmelCase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowerCAmelCase = model.roberta.encoder.layer[i]
__lowerCAmelCase = xmod_sent_encoder.layers[i]
# self attention
__lowerCAmelCase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
__lowerCAmelCase = xmod_layer.self_attn.q_proj.weight
__lowerCAmelCase = xmod_layer.self_attn.q_proj.bias
__lowerCAmelCase = xmod_layer.self_attn.k_proj.weight
__lowerCAmelCase = xmod_layer.self_attn.k_proj.bias
__lowerCAmelCase = xmod_layer.self_attn.v_proj.weight
__lowerCAmelCase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowerCAmelCase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
__lowerCAmelCase = xmod_layer.self_attn.out_proj.weight
__lowerCAmelCase = xmod_layer.self_attn.out_proj.bias
__lowerCAmelCase = xmod_layer.self_attn_layer_norm.weight
__lowerCAmelCase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowerCAmelCase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
__lowerCAmelCase = xmod_layer.fca.weight
__lowerCAmelCase = xmod_layer.fca.bias
# output
__lowerCAmelCase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
__lowerCAmelCase = xmod_layer.fca.weight
__lowerCAmelCase = xmod_layer.fca.bias
__lowerCAmelCase = xmod_layer.final_layer_norm.weight
__lowerCAmelCase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowerCAmelCase = xmod_layer.adapter_layer_norm.weight
__lowerCAmelCase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowerCAmelCase = bert_output.adapter_modules[lang_code]
__lowerCAmelCase = xmod_layer.adapter_modules[lang_code]
__lowerCAmelCase = from_adapter.fca.weight
__lowerCAmelCase = from_adapter.fca.bias
__lowerCAmelCase = from_adapter.fca.weight
__lowerCAmelCase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowerCAmelCase = xmod_sent_encoder.layer_norm.weight
__lowerCAmelCase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowerCAmelCase = xmod.model.classification_heads["mnli"].dense.weight
__lowerCAmelCase = xmod.model.classification_heads["mnli"].dense.bias
__lowerCAmelCase = xmod.model.classification_heads["mnli"].out_proj.weight
__lowerCAmelCase = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__lowerCAmelCase = xmod.model.encoder.lm_head.dense.weight
__lowerCAmelCase = xmod.model.encoder.lm_head.dense.bias
__lowerCAmelCase = xmod.model.encoder.lm_head.layer_norm.weight
__lowerCAmelCase = xmod.model.encoder.lm_head.layer_norm.bias
__lowerCAmelCase = xmod.model.encoder.lm_head.weight
__lowerCAmelCase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowerCAmelCase = xmod.encode(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ )[0]
if classification_head:
__lowerCAmelCase = xmod.model.classification_heads["mnli"](xmod.extract_features(SCREAMING_SNAKE_CASE_ ) )
else:
__lowerCAmelCase = xmod.model(SCREAMING_SNAKE_CASE_ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowerCAmelCase = torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
UpperCamelCase__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 92 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
return []
a__ , a__ : int =min(SCREAMING_SNAKE_CASE ), max(SCREAMING_SNAKE_CASE )
a__ : Optional[int] =int(max_value - min_value ) + 1
a__ : list[list] =[[] for _ in range(SCREAMING_SNAKE_CASE )]
for i in my_list:
buckets[int(i - min_value )].append(SCREAMING_SNAKE_CASE )
return [v for bucket in buckets for v in sorted(SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 95 | 0 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCamelCase_ ( UpperCamelCase__ ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Optional[Any]:
snake_case_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , 'num_encoder_blocks' ) )
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a , a=13 , a=64 , a=3 , a=4 , a=[2, 2, 2, 2] , a=[8, 4, 2, 1] , a=[16, 32, 64, 1_28] , a=[1, 4, 8, 16] , a=[1, 2, 4, 8] , a=True , a=True , a="gelu" , a=0.1 , a=0.1 , a=0.02 , a=3 , a=None , ) -> Union[str, Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = num_encoder_blocks
snake_case_ = sr_ratios
snake_case_ = depths
snake_case_ = hidden_sizes
snake_case_ = downsampling_rates
snake_case_ = num_attention_heads
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = scope
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ) -> List[Any]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self , a , a , a ) -> List[Any]:
snake_case_ = SegformerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ = model(lowerCAmelCase__ )
snake_case_ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _UpperCamelCase ( self , a , a , a ) -> Any:
snake_case_ = self.num_labels
snake_case_ = SegformerForSemanticSegmentation(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
snake_case_ = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCamelCase ( self , a , a , a ) -> str:
snake_case_ = 1
snake_case_ = SegformerForSemanticSegmentation(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(lowerCAmelCase__ )
snake_case_ = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ = config_and_inputs
snake_case_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _UpperCamelCase ( self ) -> Any:
snake_case_ = SegformerModelTester(self )
snake_case_ = SegformerConfigTester(self , config_class=lowerCAmelCase__ )
def _UpperCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> int:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _UpperCamelCase ( self ) -> int:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowerCAmelCase__ )
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowerCAmelCase__ )
@unittest.skip('SegFormer does not use inputs_embeds' )
def _UpperCamelCase ( self ) -> int:
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def _UpperCamelCase ( self ) -> Union[str, Any]:
pass
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowerCAmelCase__ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ = outputs.attentions
snake_case_ = sum(self.model_tester.depths )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# verify the first attentions (first block, first layer)
snake_case_ = (self.model_tester.image_size // 4) ** 2
snake_case_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
snake_case_ = (self.model_tester.image_size // 32) ** 2
snake_case_ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
snake_case_ = len(lowerCAmelCase__ )
# Check attention is always last and order is fine
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase__ ) )
snake_case_ = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# verify the first attentions (first block, first layer)
snake_case_ = (self.model_tester.image_size // 4) ** 2
snake_case_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _UpperCamelCase ( self ) -> Union[str, Any]:
def check_hidden_states_output(a , a , a ):
snake_case_ = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ = outputs.hidden_states
snake_case_ = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _UpperCamelCase ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase__ ):
continue
snake_case_ = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
snake_case_ = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
snake_case_ = model(**lowerCAmelCase__ ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCamelCase ( self ) -> Tuple:
pass
@slow
def _UpperCamelCase ( self ) -> List[str]:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = SegformerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __UpperCAmelCase ( ):
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ) -> Any:
snake_case_ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=lowerCAmelCase__ , align=lowerCAmelCase__ , do_random_crop=lowerCAmelCase__ )
snake_case_ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
lowerCAmelCase__ )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowerCAmelCase__ , return_tensors='pt' )
snake_case_ = encoded_inputs.pixel_values.to(lowerCAmelCase__ )
with torch.no_grad():
snake_case_ = model(lowerCAmelCase__ )
snake_case_ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
snake_case_ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def _UpperCamelCase ( self ) -> Optional[Any]:
snake_case_ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=lowerCAmelCase__ , align=lowerCAmelCase__ , do_random_crop=lowerCAmelCase__ )
snake_case_ = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(lowerCAmelCase__ )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowerCAmelCase__ , return_tensors='pt' )
snake_case_ = encoded_inputs.pixel_values.to(lowerCAmelCase__ )
with torch.no_grad():
snake_case_ = model(lowerCAmelCase__ )
snake_case_ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
snake_case_ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1E-1 ) )
@slow
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=lowerCAmelCase__ , align=lowerCAmelCase__ , do_random_crop=lowerCAmelCase__ )
snake_case_ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
lowerCAmelCase__ )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowerCAmelCase__ , return_tensors='pt' )
snake_case_ = encoded_inputs.pixel_values.to(lowerCAmelCase__ )
with torch.no_grad():
snake_case_ = model(lowerCAmelCase__ )
snake_case_ = outputs.logits.detach().cpu()
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ , target_sizes=[(5_00, 3_00)] )
snake_case_ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase__ )
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ )
snake_case_ = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase__ )
| 178 |
import numpy as np
def _A ( SCREAMING_SNAKE_CASE : np.array ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 0 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : list, _lowerCAmelCase : int = 0 ):
"""simple docstring"""
_a = length or len(_lowerCAmelCase )
_a = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_a = list_data[i + 1], list_data[i]
_a = True
return list_data if not swapped else bubble_sort(_lowerCAmelCase, length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 320 |
import numpy
# List of input, output pairs
UpperCAmelCase : str = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCAmelCase : Optional[int] = (((515, 22, 13), 555), ((61, 35, 49), 150))
UpperCAmelCase : str = [2, 4, 1, 5]
UpperCAmelCase : List[str] = len(train_data)
UpperCAmelCase : Dict = 0.0_0_9
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple="train" ):
"""simple docstring"""
return calculate_hypothesis_value(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - output(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _A ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ : Tuple =0
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=m ):
"""simple docstring"""
a__ : Any =0
for i in range(SCREAMING_SNAKE_CASE ):
if index == -1:
summation_value += _error(SCREAMING_SNAKE_CASE )
else:
summation_value += _error(SCREAMING_SNAKE_CASE ) * train_data[i][0][index]
return summation_value
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Any =summation_of_cost_derivative(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / m
return cost_derivative_value
def _A ( ):
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
a__ : Dict =0.0_0_0_0_0_2
a__ : Union[str, Any] =0
a__ : Any =0
while True:
j += 1
a__ : Any =[0, 0, 0, 0]
for i in range(0 , len(SCREAMING_SNAKE_CASE ) ):
a__ : Tuple =get_cost_derivative(i - 1 )
a__ : List[Any] =(
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE , rtol=SCREAMING_SNAKE_CASE , ):
break
a__ : Optional[Any] =temp_parameter_vector
print(("Number of iterations:", j) )
def _A ( ):
"""simple docstring"""
for i in range(len(SCREAMING_SNAKE_CASE ) ):
print(("Actual output value:", output(SCREAMING_SNAKE_CASE , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(SCREAMING_SNAKE_CASE , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 95 | 0 |
from math import ceil, sqrt
def UpperCAmelCase ( a_ = 1_0_0_0_0_0_0 ) -> Tuple:
"""simple docstring"""
__A = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
__A = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
__A = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15 |
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
a__ : Optional[Any] =len(SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
a__ : List[Any] =arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
a__ : int =arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE )]
# Reverse whole list
a__ : List[str] =arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCAmelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase : Optional[int] = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 95 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _UpperCamelCase ( UpperCamelCase__ ):
def __init__( self :Optional[Any] , lowerCamelCase :Optional[Any] , lowerCamelCase :Optional[Any] ) -> Dict:
UpperCAmelCase__ = params
UpperCAmelCase__ = np.array(lowerCAmelCase__ )
UpperCAmelCase__ = np.array([len(lowerCAmelCase__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self :List[Any] , lowerCamelCase :List[Any] ) -> str:
return (self.token_ids[index], self.lengths[index])
def __len__( self :Dict ) -> int:
return len(self.lengths )
def UpperCAmelCase_ ( self :List[Any] ) -> Any:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCAmelCase_ ( self :List[str] ) -> List[str]:
UpperCAmelCase__ = self.params.max_model_input_size
UpperCAmelCase__ = self.lengths > max_len
logger.info(f'''Splitting {sum(lowerCAmelCase__ )} too long sequences.''' )
def divide_chunks(lowerCamelCase :Dict , lowerCamelCase :str ):
return [l[i : i + n] for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ )]
UpperCAmelCase__ = []
UpperCAmelCase__ = []
if self.params.mlm:
UpperCAmelCase__ = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
UpperCAmelCase__ = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
UpperCAmelCase__ = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
UpperCAmelCase__ = np.insert(lowerCAmelCase__ , 0 , lowerCAmelCase__ )
if sub_s[-1] != sep_id:
UpperCAmelCase__ = np.insert(lowerCAmelCase__ , len(lowerCAmelCase__ ) , lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowerCAmelCase__ )
new_tok_ids.extend(lowerCAmelCase__ )
new_lengths.extend([len(lowerCAmelCase__ ) for l in sub_seqs] )
UpperCAmelCase__ = np.array(lowerCAmelCase__ )
UpperCAmelCase__ = np.array(lowerCAmelCase__ )
def UpperCAmelCase_ ( self :str ) -> Optional[int]:
UpperCAmelCase__ = len(self )
UpperCAmelCase__ = self.lengths > 11
UpperCAmelCase__ = self.token_ids[indices]
UpperCAmelCase__ = self.lengths[indices]
UpperCAmelCase__ = len(self )
logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def UpperCAmelCase_ ( self :List[str] ) -> List[Any]:
if "unk_token" not in self.params.special_tok_ids:
return
else:
UpperCAmelCase__ = self.params.special_tok_ids["unk_token"]
UpperCAmelCase__ = len(self )
UpperCAmelCase__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
UpperCAmelCase__ = (unk_occs / self.lengths) < 0.5
UpperCAmelCase__ = self.token_ids[indices]
UpperCAmelCase__ = self.lengths[indices]
UpperCAmelCase__ = len(self )
logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def UpperCAmelCase_ ( self :Union[str, Any] ) -> Tuple:
if not self.params.is_master:
return
logger.info(f'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :Optional[int] ) -> List[Any]:
UpperCAmelCase__ = [t[0] for t in batch]
UpperCAmelCase__ = [t[1] for t in batch]
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
# Max for paddings
UpperCAmelCase__ = max(lowerCAmelCase__ )
# Pad token ids
if self.params.mlm:
UpperCAmelCase__ = self.params.special_tok_ids["pad_token"]
else:
UpperCAmelCase__ = self.params.special_tok_ids["unk_token"]
UpperCAmelCase__ = [list(t.astype(lowerCAmelCase__ ) ) + [pad_idx] * (max_seq_len_ - len(lowerCAmelCase__ )) for t in token_ids]
assert len(tk_ ) == len(lowerCAmelCase__ )
assert all(len(lowerCAmelCase__ ) == max_seq_len_ for t in tk_ )
UpperCAmelCase__ = torch.tensor(tk_ ) # (bs, max_seq_len_)
UpperCAmelCase__ = torch.tensor(lowerCAmelCase__ ) # (bs)
return tk_t, lg_t
| 169 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Any =tempfile.mkdtemp()
# fmt: off
a__ : List[Any] =["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : str =dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
a__ : List[Any] =["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
a__ : Optional[int] ={"unk_token": "<unk>"}
a__ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
a__ : Optional[Any] ={
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
a__ : Dict =os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[Any] =[np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
a__ : List[Any] =[Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] =self.get_tokenizer()
a__ : int =self.get_rust_tokenizer()
a__ : List[str] =self.get_image_processor()
a__ : Dict =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
a__ : Optional[Any] =CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
a__ : Tuple =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
a__ : Dict =CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] =CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a__ : str =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a__ : int =self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
a__ : Optional[Any] =CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : str =self.get_image_processor()
a__ : Optional[int] =self.get_tokenizer()
a__ : Dict =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : str =self.prepare_image_inputs()
a__ : Any =image_processor(lowerCAmelCase__ , return_tensors="np" )
a__ : Optional[int] =processor(images=lowerCAmelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] =self.get_image_processor()
a__ : List[Any] =self.get_tokenizer()
a__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Union[str, Any] ="lower newer"
a__ : List[str] =processor(text=lowerCAmelCase__ )
a__ : str =tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Any =self.get_image_processor()
a__ : Dict =self.get_tokenizer()
a__ : Union[str, Any] =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Dict ="lower newer"
a__ : int =self.prepare_image_inputs()
a__ : Any =processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Union[str, Any] =self.get_image_processor()
a__ : Optional[Any] =self.get_tokenizer()
a__ : str =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : int =self.prepare_image_inputs()
a__ : Union[str, Any] =self.prepare_image_inputs()
a__ : Tuple =processor(images=lowerCAmelCase__ , visual_prompt=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] =self.get_image_processor()
a__ : Any =self.get_tokenizer()
a__ : Tuple =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Dict =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ : Optional[Any] =processor.batch_decode(lowerCAmelCase__ )
a__ : Dict =tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 95 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Optional[Any]:
__lowercase : Optional[Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__lowercase : List[str] = 192
__lowercase : Union[str, Any] = 768
__lowercase : Dict = 12
__lowercase : Dict = 3
__lowercase : Optional[int] = [800, 1_333]
__lowercase : str = False
elif yolos_name == "yolos_s_dWr":
__lowercase : Union[str, Any] = 330
__lowercase : Dict = 14
__lowercase : Any = 6
__lowercase : int = 1_320
elif "yolos_s" in yolos_name:
__lowercase : List[Any] = 384
__lowercase : Any = 1_536
__lowercase : Dict = 12
__lowercase : Optional[Any] = 6
elif "yolos_b" in yolos_name:
__lowercase : int = [800, 1_344]
__lowercase : Tuple = 91
__lowercase : Tuple = "huggingface/label-files"
__lowercase : Dict = "coco-detection-id2label.json"
__lowercase : List[str] = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__lowercase : Tuple = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
__lowercase : int = idalabel
__lowercase : Tuple = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase : str = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
__lowercase : List[Any] = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase : List[str] = in_proj_weight[: config.hidden_size, :]
__lowercase : int = in_proj_bias[: config.hidden_size]
__lowercase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase : Any = in_proj_weight[-config.hidden_size :, :]
__lowercase : Optional[int] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Union[str, Any]:
if "backbone" in name:
__lowercase : str = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
__lowercase : int = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
__lowercase : Optional[int] = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
__lowercase : List[str] = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
__lowercase : Optional[Any] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
__lowercase : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
__lowercase : str = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
__lowercase : List[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__lowercase : int = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__lowercase : Any = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__lowercase : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__lowercase : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__lowercase : Dict = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
__lowercase : str = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
__lowercase : List[str] = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
__lowercase : List[Any] = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
__lowercase : int = orig_state_dict.pop(__lowerCAmelCase )
if "qkv" in key:
__lowercase : List[Any] = key.split('''.''' )
__lowercase : Optional[Any] = int(key_split[2] )
__lowercase : List[Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__lowercase : str = val[:dim, :]
__lowercase : Optional[Any] = val[
dim : dim * 2, :
]
__lowercase : Dict = val[-dim:, :]
else:
__lowercase : Optional[Any] = val[:dim]
__lowercase : List[str] = val[dim : dim * 2]
__lowercase : List[str] = val[-dim:]
else:
__lowercase : str = val
return orig_state_dict
def UpperCAmelCase_ ( ) -> List[Any]:
__lowercase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase : int = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> List[str]:
__lowercase : List[str] = get_yolos_config(__lowerCAmelCase )
# load original state_dict
__lowercase : Optional[Any] = torch.load(__lowerCAmelCase , map_location='''cpu''' )["model"]
# load 🤗 model
__lowercase : Optional[Any] = YolosForObjectDetection(__lowerCAmelCase )
model.eval()
__lowercase : Any = convert_state_dict(__lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by YolosImageProcessor
__lowercase : Tuple = 800 if yolos_name != "yolos_ti" else 512
__lowercase : Union[str, Any] = YolosImageProcessor(format='''coco_detection''' , size=__lowerCAmelCase )
__lowercase : Optional[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
__lowercase : List[Any] = model(**__lowerCAmelCase )
__lowercase : Optional[Any] = outputs.logits, outputs.pred_boxes
__lowercase : List[Any] = None, None
if yolos_name == "yolos_ti":
__lowercase : Any = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
__lowercase : Union[str, Any] = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
__lowercase : str = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
__lowercase : Tuple = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
__lowercase : Dict = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
__lowercase : Optional[int] = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
__lowercase : Union[str, Any] = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
__lowercase : Optional[Any] = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
__lowercase : List[Any] = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
__lowercase : Tuple = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
__lowercase : Dict = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print('''Pushing to the hub...''' )
__lowercase : int = model_mapping[yolos_name]
image_processor.push_to_hub(__lowerCAmelCase , organization='''hustvl''' )
model.push_to_hub(__lowerCAmelCase , organization='''hustvl''' )
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__lowerCAmelCase : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 156 |
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(SCREAMING_SNAKE_CASE ) == 1:
return True
a__ : Union[str, Any] =series[1] - series[0]
for index in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Input list must be a non empty list" )
a__ : Any =0
for val in series:
answer += val
return answer / len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase_ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class UpperCamelCase_ (UpperCamelCase__ ):
__magic_name__ = """albert"""
def __init__( self : Any , lowerCAmelCase_ : Union[str, Any]=30_000 , lowerCAmelCase_ : Union[str, Any]=128 , lowerCAmelCase_ : List[Any]=4_096 , lowerCAmelCase_ : Union[str, Any]=12 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : Dict=64 , lowerCAmelCase_ : List[str]=16_384 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : List[str]="gelu_new" , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Optional[int]=512 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Dict=1e-12 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Optional[int]="absolute" , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Optional[Any]=3 , **lowerCAmelCase_ : Union[str, Any] , ) -> int:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : Optional[int] = embedding_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Any = num_hidden_groups
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : int = inner_group_num
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : List[Any] = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = classifier_dropout_prob
UpperCAmelCase_ : Tuple = position_embedding_type
class UpperCamelCase_ (UpperCamelCase__ ):
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 268 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Tuple = """M-CLIP"""
def __init__( self , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=7_6_8 , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
a__ : int =transformerDimSize
a__ : Dict =imageDimSize
super().__init__(**lowerCAmelCase__ )
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Optional[Any] = MCLIPConfig
def __init__( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Tuple =XLMRobertaModel(lowerCAmelCase__ )
a__ : List[str] =torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] =self.transformer(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
a__ : int =(embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowerCAmelCase__ ), embs
| 95 | 0 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCAmelCase : int = False
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: str = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__snake_case: int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__snake_case: Optional[Any] = torch.manual_seed(0 )
__snake_case: Optional[Any] = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase__ )
__snake_case: str = VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__snake_case: Optional[Any] = generator.manual_seed(0 )
__snake_case: Tuple = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: str = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__snake_case: Optional[Any] = "cyberpunk 2077"
__snake_case: int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__snake_case: Union[str, Any] = torch.manual_seed(0 )
__snake_case: Tuple = pipe.dual_guided(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
__snake_case: int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case: Any = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__snake_case: str = "A painting of a squirrel eating a burger "
__snake_case: Optional[int] = torch.manual_seed(0 )
__snake_case: str = pipe.text_to_image(
prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
__snake_case: Any = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case: Optional[int] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__snake_case: Optional[Any] = pipe.image_variation(lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="""numpy""" ).images
__snake_case: Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case: Any = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 111 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase : Any = 16
UpperCAmelCase : str = 32
def _A ( SCREAMING_SNAKE_CASE : Accelerator , SCREAMING_SNAKE_CASE : int = 16 ):
"""simple docstring"""
a__ : int =AutoTokenizer.from_pretrained("bert-base-cased" )
a__ : List[str] =load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
a__ : int =tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__ : Dict =datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : Dict =tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__ : Optional[Any] =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__ : str =16
elif accelerator.mixed_precision != "no":
a__ : Union[str, Any] =8
else:
a__ : List[str] =None
return tokenizer.pad(
SCREAMING_SNAKE_CASE , padding="longest" , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
a__ : Any =DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
a__ : int =DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase : str = mocked_dataloaders # noqa: F811
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE ) == "1":
a__ : Tuple =2
# Initialize accelerator
a__ : int =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : Optional[int] =config["lr"]
a__ : Union[str, Any] =int(config["num_epochs"] )
a__ : Any =int(config["seed"] )
a__ : Dict =int(config["batch_size"] )
a__ : int =evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
a__ : int =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a__ : Dict =batch_size // MAX_GPU_BATCH_SIZE
a__ : Tuple =MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE )
a__ , a__ : Optional[int] =get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : List[str] =AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ : List[str] =model.to(accelerator.device )
# Instantiate optimizer
a__ : List[Any] =AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
# Instantiate scheduler
a__ : Optional[int] =get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : Optional[int] =accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
a__ : Dict =model(**SCREAMING_SNAKE_CASE )
a__ : List[Any] =outputs.loss
a__ : List[str] =loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
a__ : Optional[Any] =0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a__ : Any =model(**SCREAMING_SNAKE_CASE )
a__ : str =outputs.logits.argmax(dim=-1 )
a__ , a__ : List[str] =accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(SCREAMING_SNAKE_CASE ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
a__ : Optional[Any] =predictions[: len(eval_dataloader.dataset ) - samples_seen]
a__ : Dict =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
a__ : Tuple =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE )
def _A ( ):
"""simple docstring"""
a__ : List[str] =argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
a__ : str =parser.parse_args()
a__ : Optional[int] ={"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 95 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return []
lowerCAmelCase : int = min(_UpperCAmelCase ), max(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = int(max_value - min_value ) + 1
lowerCAmelCase : list[list] = [[] for _ in range(_UpperCAmelCase )]
for i in my_list:
buckets[int(i - min_value )].append(_UpperCAmelCase )
return [v for bucket in buckets for v in sorted(_UpperCAmelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 138 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , ) -> Optional[Any]:
'''simple docstring'''
a__ : Union[str, Any] =size if size is not None else {"shortest_edge": 2_0}
a__ : List[str] =crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
a__ : Tuple =parent
a__ : Union[str, Any] =batch_size
a__ : List[str] =num_channels
a__ : List[Any] =image_size
a__ : str =min_resolution
a__ : Optional[int] =max_resolution
a__ : Tuple =do_resize
a__ : Union[str, Any] =size
a__ : List[Any] =do_center_crop
a__ : List[str] =crop_size
a__ : Optional[int] =do_flip_channel_order
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : int = MobileViTImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple =MobileViTImageProcessingTester(self )
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_flip_channel_order" ) )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
a__ : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
a__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : List[Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
a__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : int =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : int =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
a__ : List[str] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : List[str] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 95 | 0 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class lowercase ( UpperCamelCase__):
def __init__( self : List[Any] , _lowerCamelCase : Optional[Any]=None , **_lowerCamelCase : str ):
"""simple docstring"""
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , lowerCAmelCase__ , )
super().__init__(args=lowerCAmelCase__ , **lowerCAmelCase__ )
| 167 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , ) -> Optional[int]:
'''simple docstring'''
a__ : str =size if size is not None else {"shortest_edge": 2_0}
a__ : Union[str, Any] =crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
a__ : Tuple =parent
a__ : Optional[int] =batch_size
a__ : Any =num_channels
a__ : List[str] =image_size
a__ : Dict =min_resolution
a__ : List[Any] =max_resolution
a__ : Dict =do_resize
a__ : Union[str, Any] =size
a__ : str =do_center_crop
a__ : List[str] =crop_size
def _lowercase ( self ) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] =MobileNetVaImageProcessingTester(self )
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "crop_size" ) )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Any =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
a__ : Dict =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def _lowercase ( self ) -> Any:
'''simple docstring'''
pass
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
a__ : List[Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : Dict =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : str =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
a__ : List[str] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : Union[str, Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Any =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
a__ : Optional[Any] =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a__ : str =image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 95 | 0 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_UpperCAmelCase : Tuple =False
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self , __lowercase=3_2 ) -> Tuple:
set_seed(0 )
lowerCAmelCase_ : Optional[int] = UNetaDModel(sample_size=lowerCAmelCase__ , in_channels=3 , out_channels=3 )
lowerCAmelCase_ : Optional[int] = torch.optim.SGD(model.parameters() , lr=0.00_01 )
return model, optimizer
@slow
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCAmelCase_ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=lowerCAmelCase__ , )
lowerCAmelCase_ : Any = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=lowerCAmelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCAmelCase_ : Tuple = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(lowerCAmelCase__ ) for _ in range(4 )]
lowerCAmelCase_ : Tuple = [torch.randn((4, 3, 3_2, 3_2) ).to(lowerCAmelCase__ ) for _ in range(4 )]
lowerCAmelCase_ : Union[str, Any] = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(lowerCAmelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCAmelCase_ : Optional[Any] = self.get_model_optimizer(resolution=3_2 )
model.train().to(lowerCAmelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase_ : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase_ : Optional[int] = model(lowerCAmelCase__ , timesteps[i] ).sample
lowerCAmelCase_ : int = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCAmelCase_ : List[str] = self.get_model_optimizer(resolution=3_2 )
model.train().to(lowerCAmelCase__ )
for i in range(4 ):
optimizer.zero_grad()
lowerCAmelCase_ : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCAmelCase_ : Optional[Any] = model(lowerCAmelCase__ , timesteps[i] ).sample
lowerCAmelCase_ : List[Any] = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-5 ) ) | 262 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Any = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 95 | 0 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class a__ ( UpperCamelCase__ ):
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return 0.0
def _a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int ):
__lowerCAmelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__lowerCAmelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _a ( SCREAMING_SNAKE_CASE_ : FilterType , SCREAMING_SNAKE_CASE_ : int ):
__lowerCAmelCase = 5_12
__lowerCAmelCase = [1] + [0] * (size - 1)
__lowerCAmelCase = [filter_type.process(SCREAMING_SNAKE_CASE_ ) for item in inputs]
__lowerCAmelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowerCAmelCase = np.abs(np.fft.fft(SCREAMING_SNAKE_CASE_ ) )
__lowerCAmelCase = 20 * np.logaa(SCREAMING_SNAKE_CASE_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
__lowerCAmelCase = get_bounds(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(SCREAMING_SNAKE_CASE_ )
plt.show()
def _a ( SCREAMING_SNAKE_CASE_ : FilterType , SCREAMING_SNAKE_CASE_ : int ):
__lowerCAmelCase = 5_12
__lowerCAmelCase = [1] + [0] * (size - 1)
__lowerCAmelCase = [filter_type.process(SCREAMING_SNAKE_CASE_ ) for item in inputs]
__lowerCAmelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
__lowerCAmelCase = np.angle(np.fft.fft(SCREAMING_SNAKE_CASE_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(SCREAMING_SNAKE_CASE_ , -2 * pi ) )
plt.show()
| 92 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Any = """swin2sr"""
_lowercase : Tuple = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowerCAmelCase__=6_4 , lowerCAmelCase__=1 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8_0 , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=8 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=2 , lowerCAmelCase__=1.0 , lowerCAmelCase__="1conv" , lowerCAmelCase__="pixelshuffle" , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
a__ : Optional[Any] =image_size
a__ : Dict =patch_size
a__ : Tuple =num_channels
a__ : Union[str, Any] =embed_dim
a__ : Optional[Any] =depths
a__ : List[str] =len(lowerCAmelCase__ )
a__ : Any =num_heads
a__ : Any =window_size
a__ : str =mlp_ratio
a__ : List[str] =qkv_bias
a__ : Dict =hidden_dropout_prob
a__ : List[str] =attention_probs_dropout_prob
a__ : Dict =drop_path_rate
a__ : Optional[Any] =hidden_act
a__ : Union[str, Any] =use_absolute_embeddings
a__ : Optional[Any] =layer_norm_eps
a__ : List[Any] =initializer_range
a__ : int =upscale
a__ : Optional[int] =img_range
a__ : Any =resi_connection
a__ : Optional[Any] =upsampler
| 95 | 0 |
def __UpperCAmelCase ( a_ , a_ , a_ = 0 , a_ = 0):
snake_case_ = right or len(a_) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a_ , a_ , left + 1 , right - 1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 178 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __lowerCAmelCase :
pass
| 95 | 0 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''', [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
], )
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : List[str] ):
"""simple docstring"""
_a = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''', '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''', '''w''' ) as f:
f.write('''{\"default\": {\"dataset_size\": 42}}''' )
_a = DatasetInfosDict.from_directory(_lowerCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''', [
DatasetInfo(),
DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, ),
], )
def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : DatasetInfo ):
"""simple docstring"""
_a = str(_lowerCAmelCase )
dataset_info.write_to_directory(_lowerCAmelCase )
_a = DatasetInfo.from_directory(_lowerCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_lowerCAmelCase, '''dataset_info.json''' ) )
def A_ ( ):
"""simple docstring"""
_a = DatasetInfo(
description='''foo''', citation='''bar''', homepage='''https://foo.bar''', license='''CC0''', features=Features({'''a''': Value('''int32''' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train''', '''num_examples''': 42}], download_checksums={}, download_size=13_37, post_processing_size=4_42, dataset_size=12_34, size_in_bytes=13_37 + 4_42 + 12_34, )
_a = dataset_info._to_yaml_dict()
assert sorted(_lowerCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
_a = yaml.safe_dump(_lowerCAmelCase )
_a = yaml.safe_load(_lowerCAmelCase )
assert dataset_info_yaml_dict == reloaded
def A_ ( ):
"""simple docstring"""
_a = DatasetInfo()
_a = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''', [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=13_37 ),
} ),
], )
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : DatasetInfosDict ):
"""simple docstring"""
_a = str(_lowerCAmelCase )
dataset_infos_dict.write_to_directory(_lowerCAmelCase )
_a = DatasetInfosDict.from_directory(_lowerCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_a = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_a = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_lowerCAmelCase, '''README.md''' ) ) | 320 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Optional[Any] = """philschmid/bart-large-cnn-samsum"""
_lowercase : List[Any] = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
_lowercase : Any = """summarizer"""
_lowercase : Any = AutoTokenizer
_lowercase : str = AutoModelForSeqaSeqLM
_lowercase : Optional[int] = ["""text"""]
_lowercase : Optional[int] = ["""text"""]
def _lowercase ( self , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
return self.pre_processor(lowerCAmelCase__ , return_tensors="pt" , truncation=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.model.generate(**lowerCAmelCase__ )[0]
def _lowercase ( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return self.pre_processor.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
| 95 | 0 |
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
__A = word.split()
def justify(a_ , a_ , a_ ) -> str:
__A = max_width - width
__A = len(a_ )
if len(a_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
__A = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
__A = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
__A = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(a_ ):
num_spaces_between_words_list[i] += 1
__A = []
for i in range(a_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(a_ )
__A = []
__A = []
__A = 0
for word in words:
if width + len(a_ ) + len(a_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(a_ )
width += len(a_ )
else:
# justify the line and add it to result
answer.append(justify(a_ , a_ , a_ ) )
# reset new line and new width
__A = [word], len(a_ )
__A = max_width - width - len(a_ )
answer.append(" ".join(a_ ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 15 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase : List[Any] = [
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
UpperCAmelCase : Optional[int] = {
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase : Optional[int] = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
UpperCAmelCase : Optional[int] = {F"""funnel-transformer/{name}""": {"""do_lower_case""": True} for name in _model_names}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_INIT_CONFIGURATION
_lowercase : Union[str, Any] = FunnelTokenizer
_lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : int = 2
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<sep>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<cls>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__="##" , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , clean_text=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , wordpieces_prefix=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : Optional[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
a__ : List[str] =getattr(lowerCAmelCase__ , normalizer_state.pop("type" ) )
a__ : Union[str, Any] =do_lower_case
a__ : Any =strip_accents
a__ : Optional[Any] =tokenize_chinese_chars
a__ : Dict =normalizer_class(**lowerCAmelCase__ )
a__ : Any =do_lower_case
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> str:
'''simple docstring'''
a__ : Dict =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
a__ : Optional[int] =[self.sep_token_id]
a__ : Union[str, Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
a__ : Tuple =self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 95 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_lowerCAmelCase : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase : List[str] = {
"""google/electra-small-generator""": 5_1_2,
"""google/electra-base-generator""": 5_1_2,
"""google/electra-large-generator""": 5_1_2,
"""google/electra-small-discriminator""": 5_1_2,
"""google/electra-base-discriminator""": 5_1_2,
"""google/electra-large-discriminator""": 5_1_2,
}
_lowerCAmelCase : Tuple = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ElectraTokenizer
def __init__( self :Tuple , lowerCamelCase :int=None , lowerCamelCase :List[str]=None , lowerCamelCase :List[str]=True , lowerCamelCase :str="[UNK]" , lowerCamelCase :Any="[SEP]" , lowerCamelCase :Any="[PAD]" , lowerCamelCase :Optional[Any]="[CLS]" , lowerCamelCase :List[Any]="[MASK]" , lowerCamelCase :Tuple=True , lowerCamelCase :Dict=None , **lowerCamelCase :Dict , ) -> List[str]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
UpperCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
UpperCAmelCase__ = getattr(lowerCAmelCase__ , normalizer_state.pop("type" ) )
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = strip_accents
UpperCAmelCase__ = tokenize_chinese_chars
UpperCAmelCase__ = normalizer_class(**lowerCAmelCase__ )
UpperCAmelCase__ = do_lower_case
def UpperCAmelCase_ ( self :Any , lowerCamelCase :int , lowerCamelCase :int=None ) -> Tuple:
UpperCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self :Any , lowerCamelCase :List[Any] , lowerCamelCase :Tuple = None ) -> List[int]:
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self :List[Any] , lowerCamelCase :int , lowerCamelCase :List[str] = None ) -> Tuple[str]:
UpperCAmelCase__ = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 169 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = "arrow" , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : int =load_from_cache_file
a__ : Tuple =file_format
a__ : List[Any] =Spark(
df=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , working_dir=lowerCAmelCase__ , **lowerCAmelCase__ , )
def _lowercase ( self ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
a__ : str =None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCAmelCase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 95 | 0 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ : str = """new-model"""
if is_tf_available():
class __lowerCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ : Optional[int] = NewModelConfig
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self : Optional[Any] ):
__lowercase : str = "bert-base-cased"
__lowercase : Any = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase : Optional[Any] = TFAutoModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case_ ( self : Union[str, Any] ):
__lowercase : List[Any] = "bert-base-cased"
__lowercase : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase : Tuple = TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case_ ( self : Optional[int] ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Any = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase : Optional[int] = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase__ )
__lowercase : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case_ ( self : Dict ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : str = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase : List[str] = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case_ ( self : Tuple ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase__ )
__lowercase : Dict = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case_ ( self : List[str] ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : int = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase : Any = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ )
__lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case_ ( self : str ):
for model_name in ["bert-base-uncased"]:
__lowercase : Any = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case_ ( self : Union[str, Any] ):
for model_name in ["bert-base-uncased"]:
__lowercase : Any = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase : Dict = TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
@require_tensorflow_probability
def snake_case_ ( self : Union[str, Any] ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__lowercase : Any = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase : Union[str, Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCAmelCase__ )
__lowercase : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case_ ( self : Any ):
__lowercase : Optional[int] = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 1_4410 )
def snake_case_ ( self : Optional[Any] ):
__lowercase : int = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 1_4410 )
def snake_case_ ( self : Tuple ):
__lowercase : str = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase : Union[str, Any] = copy.deepcopy(model.config )
__lowercase : Union[str, Any] = ["FunnelBaseModel"]
__lowercase : Dict = TFAutoModel.from_config(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase__ )
__lowercase : List[str] = TFAutoModel.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case_ ( self : Union[str, Any] ):
try:
AutoConfig.register('''new-model''' , lowerCAmelCase__ )
__lowercase : Tuple = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCAmelCase__ ):
auto_class.register(lowerCAmelCase__ , lowerCAmelCase__ )
auto_class.register(lowerCAmelCase__ , lowerCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
auto_class.register(lowerCAmelCase__ , lowerCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowercase : List[str] = BertModelTester(self ).get_config()
__lowercase : Dict = NewModelConfig(**tiny_config.to_dict() )
__lowercase : Optional[int] = auto_class.from_config(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase__ )
__lowercase : int = auto_class.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def snake_case_ ( self : Optional[Any] ):
with self.assertRaisesRegex(
lowerCAmelCase__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
__lowercase : Dict = TFAutoModel.from_pretrained('''bert-base''' )
def snake_case_ ( self : str ):
with self.assertRaisesRegex(
lowerCAmelCase__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__lowercase : int = TFAutoModel.from_pretrained(lowerCAmelCase__ , revision='''aaaaaa''' )
def snake_case_ ( self : Tuple ):
with self.assertRaisesRegex(
lowerCAmelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
__lowercase : Optional[int] = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def snake_case_ ( self : Optional[Any] ):
with self.assertRaisesRegex(lowerCAmelCase__ , '''Use `from_pt=True` to load this model''' ):
__lowercase : Optional[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def snake_case_ ( self : Dict ):
__lowercase : Any = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
__lowercase : List[str] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__lowercase : Any = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
__lowercase : Dict = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 156 |
from math import pi
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 95 | 0 |
"""simple docstring"""
def snake_case ( A__ ):
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def snake_case ( A__ ):
UpperCAmelCase_ : Tuple = credit_card_number
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : List[str] = len(A__ ) - 2
for i in range(A__ ,-1 ,-2 ):
# double the value of every second digit
UpperCAmelCase_ : List[Any] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCAmelCase_ : List[str] = cc_number[:i] + str(A__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(A__ ) - 1 ,-1 ,-2 ):
total += int(cc_number[i] )
return total % 10 == 0
def snake_case ( A__ ):
UpperCAmelCase_ : List[Any] = F"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(F"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(A__ ) <= 16:
print(F"""{error_message} of its length.""" )
return False
if not validate_initial_digits(A__ ):
print(F"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(A__ ):
print(F"""{error_message} it fails the Luhn check.""" )
return False
print(F"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('''4111111111111111''')
validate_credit_card_number('''32323''')
| 268 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCAmelCase : int = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
a__ : Optional[int] =XLNetConfig.from_json_file(SCREAMING_SNAKE_CASE )
a__ : Dict =finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
a__ : List[str] =finetuning_task
a__ : Tuple =GLUE_TASKS_NUM_LABELS[finetuning_task]
a__ : List[Any] =XLNetForSequenceClassification(SCREAMING_SNAKE_CASE )
elif "squad" in finetuning_task:
a__ : Optional[int] =finetuning_task
a__ : Dict =XLNetForQuestionAnswering(SCREAMING_SNAKE_CASE )
else:
a__ : List[Any] =XLNetLMHeadModel(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'''Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
print(f'''Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
UpperCAmelCase : int = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 95 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : Tuple = {
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class __snake_case ( UpperCamelCase__ ):
'''simple docstring'''
lowerCAmelCase__ = """swin2sr"""
lowerCAmelCase__ = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Any , A : Optional[Any]=64 , A : Any=1 , A : List[str]=3 , A : List[str]=180 , A : List[Any]=[6, 6, 6, 6, 6, 6] , A : List[str]=[6, 6, 6, 6, 6, 6] , A : Tuple=8 , A : Optional[int]=2.0 , A : Tuple=True , A : List[str]=0.0 , A : Optional[int]=0.0 , A : List[Any]=0.1 , A : Any="gelu" , A : Dict=False , A : List[str]=0.02 , A : Any=1E-5 , A : Any=2 , A : Union[str, Any]=1.0 , A : Tuple="1conv" , A : Optional[Any]="pixelshuffle" , **A : Optional[int] , ):
super().__init__(**lowerCAmelCase__ )
__snake_case: Optional[Any] = image_size
__snake_case: Dict = patch_size
__snake_case: Tuple = num_channels
__snake_case: Union[str, Any] = embed_dim
__snake_case: Optional[Any] = depths
__snake_case: List[str] = len(lowerCAmelCase__ )
__snake_case: Any = num_heads
__snake_case: Any = window_size
__snake_case: str = mlp_ratio
__snake_case: List[str] = qkv_bias
__snake_case: Dict = hidden_dropout_prob
__snake_case: List[str] = attention_probs_dropout_prob
__snake_case: Dict = drop_path_rate
__snake_case: Optional[Any] = hidden_act
__snake_case: Union[str, Any] = use_absolute_embeddings
__snake_case: Optional[Any] = layer_norm_eps
__snake_case: List[Any] = initializer_range
__snake_case: int = upscale
__snake_case: Optional[int] = img_range
__snake_case: Any = resi_connection
__snake_case: Optional[Any] = upsampler
| 111 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : List[Any] = """canine"""
def __init__( self , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_6_3_8_4 , lowerCAmelCase__=1_6 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__=0XE0_00 , lowerCAmelCase__=0XE0_01 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=8 , lowerCAmelCase__=1_6_3_8_4 , lowerCAmelCase__=1_2_8 , **lowerCAmelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Optional[int] =max_position_embeddings
a__ : str =hidden_size
a__ : Optional[Any] =num_hidden_layers
a__ : Tuple =num_attention_heads
a__ : Optional[Any] =intermediate_size
a__ : Optional[int] =hidden_act
a__ : List[Any] =hidden_dropout_prob
a__ : Union[str, Any] =attention_probs_dropout_prob
a__ : Optional[Any] =initializer_range
a__ : Union[str, Any] =type_vocab_size
a__ : Optional[int] =layer_norm_eps
# Character config:
a__ : int =downsampling_rate
a__ : Optional[Any] =upsampling_kernel_size
a__ : Union[str, Any] =num_hash_functions
a__ : Any =num_hash_buckets
a__ : int =local_transformer_stride
| 95 | 0 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__A : Union[str, Any] = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__( self : List[str] , **UpperCAmelCase_ : Dict ):
requires_backends(self , ['bs4'] )
super().__init__(**lowerCAmelCase__ )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : List[str] = []
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : List[str] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowerCAmelCase : Dict = parent.find_all(child.name , recursive=lowerCAmelCase__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(lowerCAmelCase__ ) else next(i for i, s in enumerate(lowerCAmelCase__ , 1 ) if s is child ) )
lowerCAmelCase : List[str] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowercase__ ( self : int , UpperCAmelCase_ : str ):
lowerCAmelCase : Tuple = BeautifulSoup(lowerCAmelCase__ , 'html.parser' )
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Tuple = []
for element in html_code.descendants:
if type(lowerCAmelCase__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
lowerCAmelCase : Any = html.unescape(lowerCAmelCase__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(lowerCAmelCase__ )
lowerCAmelCase : Any = self.xpath_soup(lowerCAmelCase__ )
stringaxtag_seq.append(lowerCAmelCase__ )
stringaxsubs_seq.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError('Number of doc strings and xtags does not correspond' )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError('Number of doc strings and xsubs does not correspond' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowercase__ ( self : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str ):
lowerCAmelCase : List[str] = ""
for tagname, subs in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
xpath += f"/{tagname}"
if subs != 0:
xpath += f"[{subs}]"
return xpath
def __call__( self : Dict , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : List[Any] = False
# Check that strings has a valid type
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase : Dict = True
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
if len(lowerCAmelCase__ ) == 0 or isinstance(html_strings[0] , lowerCAmelCase__ ):
lowerCAmelCase : Optional[Any] = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
f"but is of type {type(lowerCAmelCase__ )}." )
lowerCAmelCase : Optional[Any] = bool(isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(html_strings[0] , lowerCAmelCase__ )) )
if not is_batched:
lowerCAmelCase : Tuple = [html_strings]
# Get nodes + xpaths
lowerCAmelCase : Tuple = []
lowerCAmelCase : int = []
for html_string in html_strings:
lowerCAmelCase : Optional[Any] = self.get_three_from_single(lowerCAmelCase__ )
nodes.append(lowerCAmelCase__ )
lowerCAmelCase : Dict = []
for node, tag_list, sub_list in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase : Optional[int] = self.construct_xpath(lowerCAmelCase__ , lowerCAmelCase__ )
xpath_strings.append(lowerCAmelCase__ )
xpaths.append(lowerCAmelCase__ )
# return as Dict
lowerCAmelCase : Optional[Any] = {"nodes": nodes, "xpaths": xpaths}
lowerCAmelCase : int = BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
return encoded_inputs
| 138 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCAmelCase : int = False
class __lowerCAmelCase ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : str =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
a__ : Optional[Any] =torch.manual_seed(0 )
a__ : Optional[Any] =pipe.dual_guided(
prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase__ )
a__ : str =VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : Optional[Any] =generator.manual_seed(0 )
a__ : Tuple =pipe.dual_guided(
prompt="first prompt" , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : str =VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a__ : Optional[Any] ="cyberpunk 2077"
a__ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
a__ : Union[str, Any] =torch.manual_seed(0 )
a__ : Tuple =pipe.dual_guided(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , text_to_image_strength=0.75 , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" , ).images
a__ : int =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Any =np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
a__ : str ="A painting of a squirrel eating a burger "
a__ : Optional[int] =torch.manual_seed(0 )
a__ : str =pipe.text_to_image(
prompt=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" ).images
a__ : Any =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Optional[int] =np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
a__ : Optional[Any] =pipe.image_variation(lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="numpy" ).images
a__ : Union[str, Any] =image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Any =np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 95 | 0 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class lowercase :
pass
| 167 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __lowerCAmelCase :
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
def _lowercase ( self ) -> int:
'''simple docstring'''
raise NotImplementedError()
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : str =tokenizer
a__ : List[str] =skip_prompt
a__ : List[Any] =decode_kwargs
# variables used in the streaming process
a__ : Dict =[]
a__ : int =0
a__ : str =True
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
a__ : Any =value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
a__ : Dict =False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
a__ : List[Any] =text[self.print_len :]
a__ : List[str] =[]
a__ : Optional[int] =0
# If the last token is a CJK character, we print the characters.
elif len(lowerCAmelCase__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
a__ : List[str] =text[self.print_len :]
self.print_len += len(lowerCAmelCase__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
a__ : str =text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(lowerCAmelCase__ )
self.on_finalized_text(lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
if len(self.token_cache ) > 0:
a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
a__ : List[Any] =text[self.print_len :]
a__ : List[str] =[]
a__ : Optional[int] =0
else:
a__ : Union[str, Any] =""
a__ : Any =True
self.on_finalized_text(lowerCAmelCase__ , stream_end=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Optional[Any]:
'''simple docstring'''
print(lowerCAmelCase__ , flush=lowerCAmelCase__ , end="" if not stream_end else None )
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : str =Queue()
a__ : Optional[Any] =None
a__ : Any =timeout
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> List[str]:
'''simple docstring'''
self.text_queue.put(lowerCAmelCase__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> Dict:
'''simple docstring'''
return self
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : int =self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 95 | 0 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
_UpperCAmelCase : List[str] =True
except ImportError:
_UpperCAmelCase : Optional[Any] =False
try:
from torch.hub import _get_torch_home
_UpperCAmelCase : Optional[int] =_get_torch_home()
except ImportError:
_UpperCAmelCase : Optional[Any] =os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
_UpperCAmelCase : Union[str, Any] =os.path.join(torch_cache_home, """transformers""")
_UpperCAmelCase : Optional[Any] ="""https://cdn.huggingface.co"""
_UpperCAmelCase : int ="""https://s3.amazonaws.com/models.huggingface.co/bert"""
_UpperCAmelCase : Optional[int] ="""/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
_UpperCAmelCase : Union[str, Any] =os.path.join(PATH, """config.yaml""")
_UpperCAmelCase : str =os.path.join(PATH, """attributes.txt""")
_UpperCAmelCase : int =os.path.join(PATH, """objects.txt""")
_UpperCAmelCase : int =os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
_UpperCAmelCase : List[str] =os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
_UpperCAmelCase : Dict =os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
_UpperCAmelCase : Union[str, Any] ="""pytorch_model.bin"""
_UpperCAmelCase : List[Any] ="""config.yaml"""
def lowerCAmelCase ( lowerCAmelCase_=OBJECTS , lowerCAmelCase_=ATTRIBUTES )-> str:
lowerCAmelCase_ : List[str] = []
with open(lowerCAmelCase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
lowerCAmelCase_ : Optional[Any] = []
with open(lowerCAmelCase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
lowerCAmelCase_ : Union[str, Any] = OrderedDict()
with open(lowerCAmelCase_ , '''rb''' ) as f:
lowerCAmelCase_ : List[Any] = pkl.load(lowerCAmelCase_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
lowerCAmelCase_ : Optional[int] = ckp.pop(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , np.ndarray ):
lowerCAmelCase_ : int = torch.tensor(lowerCAmelCase_ )
else:
assert isinstance(lowerCAmelCase_ , torch.tensor ), type(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = v
return r
class snake_case__:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = {}
def __init__( self , __lowercase , __lowercase = "root" , __lowercase=0 ) -> Tuple:
lowerCAmelCase_ : Optional[Any] = name
lowerCAmelCase_ : Optional[Any] = level
lowerCAmelCase_ : Optional[Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowerCAmelCase_ : int = copy.deepcopy(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = copy.deepcopy(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase_ : Dict = Config(lowerCAmelCase__ , name=lowerCAmelCase__ , level=level + 1 )
lowerCAmelCase_ : int = v
setattr(self , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : int = d
def __repr__( self ) -> List[Any]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self , __lowercase , __lowercase ) -> str:
lowerCAmelCase_ : List[str] = val
lowerCAmelCase_ : Dict = val
lowerCAmelCase_ : int = key.split('''.''' )
lowerCAmelCase_ : Optional[Any] = len(lowerCAmelCase__ ) - 1
lowerCAmelCase_ : str = self._pointer
if len(lowerCAmelCase__ ) > 1:
for i, l in enumerate(lowerCAmelCase__ ):
if hasattr(self , lowerCAmelCase__ ) and isinstance(getattr(self , lowerCAmelCase__ ) , lowerCAmelCase__ ):
setattr(getattr(self , lowerCAmelCase__ ) , '''.'''.join(levels[i:] ) , lowerCAmelCase__ )
if l == last_level:
lowerCAmelCase_ : str = val
else:
lowerCAmelCase_ : Union[str, Any] = pointer[l]
def lowercase_ ( self ) -> int:
return self._pointer
def lowercase_ ( self , __lowercase , __lowercase ) -> Union[str, Any]:
with open(f"""{file_name}""" , '''w''' ) as stream:
dump(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase_ ( self , __lowercase , __lowercase ) -> int:
with open(f"""{file_name}""" , '''w''' ) as stream:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
@staticmethod
def lowercase_ ( __lowercase ) -> Optional[Any]:
with open(lowerCAmelCase__ ) as stream:
lowerCAmelCase_ : Tuple = load(lowerCAmelCase__ , Loader=lowerCAmelCase__ )
return data
def __str__( self ) -> Tuple:
lowerCAmelCase_ : str = " "
if self._name != "root":
lowerCAmelCase_ : Optional[int] = f"""{t * (self._level-1)}{self._name}:\n"""
else:
lowerCAmelCase_ : Union[str, Any] = ""
lowerCAmelCase_ : List[str] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(lowerCAmelCase__ ).__name__})\n"""
lowerCAmelCase_ : Optional[int] = level
return r[:-1]
@classmethod
def lowercase_ ( cls , __lowercase , **__lowercase ) -> int:
lowerCAmelCase_ : Dict = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
return cls(lowerCAmelCase__ )
@classmethod
def lowercase_ ( cls , __lowercase , **__lowercase ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = kwargs.pop('''cache_dir''' , lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = kwargs.pop('''force_download''' , lowerCAmelCase__ )
lowerCAmelCase_ : int = kwargs.pop('''resume_download''' , lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = kwargs.pop('''proxies''' , lowerCAmelCase__ )
lowerCAmelCase_ : Any = kwargs.pop('''local_files_only''' , lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ):
lowerCAmelCase_ : Any = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
elif os.path.isfile(lowerCAmelCase__ ) or is_remote_url(lowerCAmelCase__ ):
lowerCAmelCase_ : Dict = pretrained_model_name_or_path
else:
lowerCAmelCase_ : int = hf_bucket_url(lowerCAmelCase__ , filename=lowerCAmelCase__ , use_cdn=lowerCAmelCase__ )
try:
# Load from URL or cache if already cached
lowerCAmelCase_ : Optional[Any] = cached_path(
lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowerCAmelCase_ : Tuple = Config.load_yaml(lowerCAmelCase__ )
except EnvironmentError:
lowerCAmelCase_ : Union[str, Any] = "Can't load config for"
raise EnvironmentError(lowerCAmelCase__ )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(lowerCAmelCase__ ), kwargs
def lowerCAmelCase ( lowerCAmelCase_ )-> Union[str, Any]:
lowerCAmelCase_ : List[Any] = torch.load('''dump.pt''' , map_location=in_tensor.device )
lowerCAmelCase_ : Any = in_tensor.numpy()
lowerCAmelCase_ : Any = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , rtol=0.01 , atol=0.1 ), (
f"""{sum([1 for x in np.isclose(lowerCAmelCase_ , lowerCAmelCase_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowerCAmelCase ( lowerCAmelCase_ )-> Optional[int]:
lowerCAmelCase_ : Dict = urlparse(lowerCAmelCase_ )
return parsed.scheme in ("http", "https")
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True )-> Union[str, Any]:
lowerCAmelCase_ : Optional[int] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowerCAmelCase_ : int = "/" not in model_id
if legacy_format:
return f"""{endpoint}/{model_id}-{filename}"""
else:
return f"""{endpoint}/{model_id}/{filename}"""
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=0 , lowerCAmelCase_=None , )-> List[str]:
lowerCAmelCase_ : List[str] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
ua += "; " + "; ".join('''{}/{}'''.format(lowerCAmelCase_ , lowerCAmelCase_ ) for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
ua += "; " + user_agent
lowerCAmelCase_ : List[str] = {"user-agent": ua}
if resume_size > 0:
lowerCAmelCase_ : str = "bytes=%d-" % (resume_size,)
lowerCAmelCase_ : int = requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ , proxies=lowerCAmelCase_ , headers=lowerCAmelCase_ )
if response.status_code == 416: # Range not satisfiable
return
lowerCAmelCase_ : Optional[int] = response.headers.get('''Content-Length''' )
lowerCAmelCase_ : Dict = resume_size + int(lowerCAmelCase_ ) if content_length is not None else None
lowerCAmelCase_ : Any = tqdm(
unit='''B''' , unit_scale=lowerCAmelCase_ , total=lowerCAmelCase_ , initial=lowerCAmelCase_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCAmelCase_ ) )
temp_file.write(lowerCAmelCase_ )
progress.close()
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=10 , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=False , )-> Any:
if cache_dir is None:
lowerCAmelCase_ : int = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase_ : Tuple = str(lowerCAmelCase_ )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
lowerCAmelCase_ : List[Any] = None
if not local_files_only:
try:
lowerCAmelCase_ : Union[str, Any] = requests.head(lowerCAmelCase_ , allow_redirects=lowerCAmelCase_ , proxies=lowerCAmelCase_ , timeout=lowerCAmelCase_ )
if response.status_code == 200:
lowerCAmelCase_ : Tuple = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowerCAmelCase_ : List[Any] = url_to_filename(lowerCAmelCase_ , lowerCAmelCase_ )
# get cache path to put the file
lowerCAmelCase_ : str = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCAmelCase_ ):
return cache_path
else:
lowerCAmelCase_ : Dict = [
file
for file in fnmatch.filter(os.listdir(lowerCAmelCase_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowerCAmelCase_ ) > 0:
return os.path.join(lowerCAmelCase_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowerCAmelCase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowerCAmelCase_ : Dict = cache_path + ".lock"
with FileLock(lowerCAmelCase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCAmelCase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowerCAmelCase_ : Any = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(lowerCAmelCase_ , '''a+b''' ) as f:
yield f
lowerCAmelCase_ : Optional[int] = _resumable_file_manager
if os.path.exists(lowerCAmelCase_ ):
lowerCAmelCase_ : Dict = os.stat(lowerCAmelCase_ ).st_size
else:
lowerCAmelCase_ : Dict = 0
else:
lowerCAmelCase_ : Dict = partial(tempfile.NamedTemporaryFile , dir=lowerCAmelCase_ , delete=lowerCAmelCase_ )
lowerCAmelCase_ : Union[str, Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , lowerCAmelCase_ , temp_file.name , )
http_get(
lowerCAmelCase_ , lowerCAmelCase_ , proxies=lowerCAmelCase_ , resume_size=lowerCAmelCase_ , user_agent=lowerCAmelCase_ , )
os.replace(temp_file.name , lowerCAmelCase_ )
lowerCAmelCase_ : str = {"url": url, "etag": etag}
lowerCAmelCase_ : List[Any] = cache_path + ".json"
with open(lowerCAmelCase_ , '''w''' ) as meta_file:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
return cache_path
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=None )-> List[Any]:
lowerCAmelCase_ : Dict = url.encode('''utf-8''' )
lowerCAmelCase_ : Dict = shaaaa(lowerCAmelCase_ )
lowerCAmelCase_ : int = url_hash.hexdigest()
if etag:
lowerCAmelCase_ : Optional[int] = etag.encode('''utf-8''' )
lowerCAmelCase_ : List[str] = shaaaa(lowerCAmelCase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , )-> List[Any]:
if cache_dir is None:
lowerCAmelCase_ : Union[str, Any] = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase_ : int = str(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase_ : Dict = str(lowerCAmelCase_ )
if is_remote_url(lowerCAmelCase_ ):
# URL, so get it from the cache (downloading if necessary)
lowerCAmelCase_ : List[Any] = get_from_cache(
lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , user_agent=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
elif os.path.exists(lowerCAmelCase_ ):
# File, and it exists.
lowerCAmelCase_ : Tuple = url_or_filename
elif urlparse(lowerCAmelCase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowerCAmelCase_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowerCAmelCase_ ) )
if extract_compressed_file:
if not is_zipfile(lowerCAmelCase_ ) and not tarfile.is_tarfile(lowerCAmelCase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowerCAmelCase_ : List[Any] = os.path.split(lowerCAmelCase_ )
lowerCAmelCase_ : int = output_file.replace('''.''' , '''-''' ) + "-extracted"
lowerCAmelCase_ : str = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
if os.path.isdir(lowerCAmelCase_ ) and os.listdir(lowerCAmelCase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowerCAmelCase_ : List[Any] = output_path + ".lock"
with FileLock(lowerCAmelCase_ ):
shutil.rmtree(lowerCAmelCase_ , ignore_errors=lowerCAmelCase_ )
os.makedirs(lowerCAmelCase_ )
if is_zipfile(lowerCAmelCase_ ):
with ZipFile(lowerCAmelCase_ , '''r''' ) as zip_file:
zip_file.extractall(lowerCAmelCase_ )
zip_file.close()
elif tarfile.is_tarfile(lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[Any] = tarfile.open(lowerCAmelCase_ )
tar_file.extractall(lowerCAmelCase_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowerCAmelCase_ ) )
return output_path_extracted
return output_path
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_="," )-> Optional[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
if os.path.isfile(lowerCAmelCase_ ):
with open(lowerCAmelCase_ ) as f:
lowerCAmelCase_ : str = eval(f.read() )
else:
lowerCAmelCase_ : Tuple = requests.get(lowerCAmelCase_ )
try:
lowerCAmelCase_ : List[Any] = requests.json()
except Exception:
lowerCAmelCase_ : Optional[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
lowerCAmelCase_ : List[Any] = eval(lowerCAmelCase_ )
except Exception:
lowerCAmelCase_ : int = data.split('''\n''' )
req.close()
return data
def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]:
lowerCAmelCase_ : Optional[int] = requests.get(lowerCAmelCase_ )
lowerCAmelCase_ : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCAmelCase ( lowerCAmelCase_ )-> int:
lowerCAmelCase_ : Optional[Any] = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCAmelCase_ )
with open(lowerCAmelCase_ , '''rb''' ) as stream:
lowerCAmelCase_ : Dict = pkl.load(lowerCAmelCase_ )
lowerCAmelCase_ : List[str] = weights.pop('''model''' )
lowerCAmelCase_ : Union[str, Any] = {}
for k, v in model.items():
lowerCAmelCase_ : Optional[int] = torch.from_numpy(lowerCAmelCase_ )
if "running_var" in k:
lowerCAmelCase_ : str = torch.tensor([0] )
lowerCAmelCase_ : Optional[Any] = k.replace('''running_var''' , '''num_batches_tracked''' )
lowerCAmelCase_ : int = zero
return new
def lowerCAmelCase ( )-> Union[str, Any]:
print(f"""{os.path.abspath(os.path.join(lowerCAmelCase_ , os.pardir ) )}/demo.ipynb""" )
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_="RGB" )-> Dict:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
if os.path.isfile(lowerCAmelCase_ ):
lowerCAmelCase_ : Optional[Any] = cva.imread(lowerCAmelCase_ )
else:
lowerCAmelCase_ : Union[str, Any] = get_image_from_url(lowerCAmelCase_ )
assert img is not None, f"""could not connect to: {im}"""
lowerCAmelCase_ : Optional[Any] = cva.cvtColor(lowerCAmelCase_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowerCAmelCase_ : str = img[:, :, ::-1]
return img
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_=1 )-> int:
return (images[i : i + batch] for i in range(0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ )) | 262 |
def _A ( SCREAMING_SNAKE_CASE : int = 50 ):
"""simple docstring"""
a__ : Any =[1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 95 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
UpperCamelCase__ = random.Random()
if is_torch_available():
import torch
def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any=1.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : List[str]=None ):
if rng is None:
__lowerCAmelCase = global_rng
__lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class a__ ( unittest.TestCase ):
def __init__( self , _A , _A=7 , _A=4_0_0 , _A=2_0_0_0 , _A=1 , _A=0.0 , _A=1_6_0_0_0 , _A=True , _A=True , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = min_seq_length
__lowerCAmelCase = max_seq_length
__lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCAmelCase = feature_size
__lowerCAmelCase = padding_value
__lowerCAmelCase = sampling_rate
__lowerCAmelCase = return_attention_mask
__lowerCAmelCase = do_normalize
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __SCREAMING_SNAKE_CASE( self , _A=False , _A=False ):
"""simple docstring"""
def _flatten(_A ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
__lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCAmelCase = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a__ ( UpperCamelCase__ , unittest.TestCase ):
_a : int = ASTFeatureExtractor
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = ASTFeatureExtractionTester(self )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowerCAmelCase = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
__lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
__lowerCAmelCase = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_values
__lowerCAmelCase = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__lowerCAmelCase = np.asarray(lowerCAmelCase__ )
__lowerCAmelCase = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
__lowerCAmelCase = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
@require_torch
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
import torch
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
__lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCAmelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__lowerCAmelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
from datasets import load_dataset
__lowerCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__lowerCAmelCase = ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] )
# fmt: on
__lowerCAmelCase = self._load_datasamples(1 )
__lowerCAmelCase = ASTFeatureExtractor()
__lowerCAmelCase = feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , lowerCAmelCase__ , atol=1E-4 ) )
| 92 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) == 0:
return []
a__ , a__ : int =min(SCREAMING_SNAKE_CASE ), max(SCREAMING_SNAKE_CASE )
a__ : Optional[int] =int(max_value - min_value ) + 1
a__ : list[list] =[[] for _ in range(SCREAMING_SNAKE_CASE )]
for i in my_list:
buckets[int(i - min_value )].append(SCREAMING_SNAKE_CASE )
return [v for bucket in buckets for v in sorted(SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 95 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class UpperCamelCase_ ( UpperCamelCase__ ):
'''simple docstring'''
lowerCAmelCase = """visual_bert"""
def __init__( self , a=3_05_22 , a=7_68 , a=5_12 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=2 , a=0.02 , a=1E-12 , a=False , a=True , a=1 , a=0 , a=2 , **a , ) -> Dict:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = visual_embedding_dim
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = bypass_transformer
snake_case_ = special_visual_initialize
| 178 |
import numpy as np
def _A ( SCREAMING_SNAKE_CASE : np.array ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 0 |
"""simple docstring"""
import os
from distutils.util import strtobool
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
for e in env_keys:
_a = int(os.environ.get(_lowerCAmelCase, -1 ) )
if val >= 0:
return val
return default
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : int=False ):
"""simple docstring"""
_a = os.environ.get(_lowerCAmelCase, str(_lowerCAmelCase ) )
return strtobool(_lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : int="no" ):
"""simple docstring"""
_a = os.environ.get(_lowerCAmelCase, str(_lowerCAmelCase ) )
return value | 320 |
import numpy
# List of input, output pairs
UpperCAmelCase : str = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCAmelCase : Optional[int] = (((515, 22, 13), 555), ((61, 35, 49), 150))
UpperCAmelCase : str = [2, 4, 1, 5]
UpperCAmelCase : List[str] = len(train_data)
UpperCAmelCase : Dict = 0.0_0_9
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple="train" ):
"""simple docstring"""
return calculate_hypothesis_value(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - output(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _A ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ : Tuple =0
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=m ):
"""simple docstring"""
a__ : Any =0
for i in range(SCREAMING_SNAKE_CASE ):
if index == -1:
summation_value += _error(SCREAMING_SNAKE_CASE )
else:
summation_value += _error(SCREAMING_SNAKE_CASE ) * train_data[i][0][index]
return summation_value
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Any =summation_of_cost_derivative(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / m
return cost_derivative_value
def _A ( ):
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
a__ : Dict =0.0_0_0_0_0_2
a__ : Union[str, Any] =0
a__ : Any =0
while True:
j += 1
a__ : Any =[0, 0, 0, 0]
for i in range(0 , len(SCREAMING_SNAKE_CASE ) ):
a__ : Tuple =get_cost_derivative(i - 1 )
a__ : List[Any] =(
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE , rtol=SCREAMING_SNAKE_CASE , ):
break
a__ : Optional[Any] =temp_parameter_vector
print(("Number of iterations:", j) )
def _A ( ):
"""simple docstring"""
for i in range(len(SCREAMING_SNAKE_CASE ) ):
print(("Actual output value:", output(SCREAMING_SNAKE_CASE , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(SCREAMING_SNAKE_CASE , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 95 | 0 |
import numpy
# List of input, output pairs
SCREAMING_SNAKE_CASE :str = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
SCREAMING_SNAKE_CASE :Optional[int] = (((515, 22, 13), 555), ((61, 35, 49), 150))
SCREAMING_SNAKE_CASE :str = [2, 4, 1, 5]
SCREAMING_SNAKE_CASE :List[str] = len(train_data)
SCREAMING_SNAKE_CASE :Dict = 0.009
def UpperCAmelCase ( a_ , a_="train" ) -> int:
"""simple docstring"""
return calculate_hypothesis_value(a_ , a_ ) - output(
a_ , a_ )
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
__A = 0
for i in range(len(a_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCAmelCase ( a_ , a_ ) -> Dict:
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCAmelCase ( a_ , a_=m ) -> int:
"""simple docstring"""
__A = 0
for i in range(a_ ):
if index == -1:
summation_value += _error(a_ )
else:
summation_value += _error(a_ ) * train_data[i][0][index]
return summation_value
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
__A = summation_of_cost_derivative(a_ , a_ ) / m
return cost_derivative_value
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__A = 0.000_002
__A = 0
__A = 0
while True:
j += 1
__A = [0, 0, 0, 0]
for i in range(0 , len(a_ ) ):
__A = get_cost_derivative(i - 1 )
__A = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
a_ , a_ , atol=a_ , rtol=a_ , ):
break
__A = temp_parameter_vector
print(("Number of iterations:", j) )
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
for i in range(len(a_ ) ):
print(("Actual output value:", output(a_ , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(a_ , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 15 |
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
a__ : Optional[Any] =len(SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
a__ : List[Any] =arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
a__ : int =arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE )]
# Reverse whole list
a__ : List[str] =arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCAmelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase : Optional[int] = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 95 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase : Any = 1_6
_lowerCAmelCase : str = 3_2
def lowerCAmelCase ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
UpperCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCAmelCase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase__ = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase__ = 8
else:
UpperCAmelCase__ = None
return tokenizer.pad(
_lowerCAmelCase , padding="longest" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase__ = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
UpperCAmelCase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCAmelCase : str = mocked_dataloaders # noqa: F811
def lowerCAmelCase ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCAmelCase ) == "1":
UpperCAmelCase__ = 2
# Initialize accelerator
UpperCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ = config["lr"]
UpperCAmelCase__ = int(config["num_epochs"] )
UpperCAmelCase__ = int(config["seed"] )
UpperCAmelCase__ = int(config["batch_size"] )
UpperCAmelCase__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase__ = MAX_GPU_BATCH_SIZE
set_seed(_lowerCAmelCase )
UpperCAmelCase__ = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ = AdamW(params=model.parameters() , lr=_lowerCAmelCase )
# Instantiate scheduler
UpperCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase__ = model(**_lowerCAmelCase )
UpperCAmelCase__ = outputs.loss
UpperCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
UpperCAmelCase__ = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ = model(**_lowerCAmelCase )
UpperCAmelCase__ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_lowerCAmelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
UpperCAmelCase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
UpperCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowerCAmelCase )
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 169 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Any =tempfile.mkdtemp()
# fmt: off
a__ : List[Any] =["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : str =dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
a__ : List[Any] =["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
a__ : Optional[int] ={"unk_token": "<unk>"}
a__ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
a__ : Optional[Any] ={
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
a__ : Dict =os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[Any] =[np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
a__ : List[Any] =[Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] =self.get_tokenizer()
a__ : int =self.get_rust_tokenizer()
a__ : List[str] =self.get_image_processor()
a__ : Dict =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
a__ : Optional[Any] =CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
a__ : Tuple =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
a__ : Dict =CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] =CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a__ : str =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a__ : int =self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
a__ : Optional[Any] =CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : str =self.get_image_processor()
a__ : Optional[int] =self.get_tokenizer()
a__ : Dict =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : str =self.prepare_image_inputs()
a__ : Any =image_processor(lowerCAmelCase__ , return_tensors="np" )
a__ : Optional[int] =processor(images=lowerCAmelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] =self.get_image_processor()
a__ : List[Any] =self.get_tokenizer()
a__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Union[str, Any] ="lower newer"
a__ : List[str] =processor(text=lowerCAmelCase__ )
a__ : str =tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Any =self.get_image_processor()
a__ : Dict =self.get_tokenizer()
a__ : Union[str, Any] =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Dict ="lower newer"
a__ : int =self.prepare_image_inputs()
a__ : Any =processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Union[str, Any] =self.get_image_processor()
a__ : Optional[Any] =self.get_tokenizer()
a__ : str =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : int =self.prepare_image_inputs()
a__ : Union[str, Any] =self.prepare_image_inputs()
a__ : Tuple =processor(images=lowerCAmelCase__ , visual_prompt=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] =self.get_image_processor()
a__ : Any =self.get_tokenizer()
a__ : Tuple =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Dict =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ : Optional[Any] =processor.batch_decode(lowerCAmelCase__ )
a__ : Dict =tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 95 | 0 |
from __future__ import annotations
import os
from typing import Any
import requests
__lowerCAmelCase : str = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowerCAmelCase : Optional[int] = BASE_URL + """/user"""
# https://github.com/settings/tokens
__lowerCAmelCase : Optional[Any] = os.environ.get("USER_TOKEN", "")
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Optional[int]:
__lowercase : Tuple = {
"Authorization": F'token {auth_token}',
"Accept": "application/vnd.github.v3+json",
}
return requests.get(__lowerCAmelCase , headers=__lowerCAmelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'{key}: {value}')
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 156 |
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(SCREAMING_SNAKE_CASE ) == 1:
return True
a__ : Union[str, Any] =series[1] - series[0]
for index in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _A ( SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Input list must be a non empty list" )
a__ : Any =0
for val in series:
answer += val
return answer / len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 0 |
"""simple docstring"""
import argparse
from collections import defaultdict
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[Any] = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(A__ ,"r" ) as f:
UpperCAmelCase_ : Optional[int] = f.readlines()
UpperCAmelCase_ : List[str] = F"""class {class_name}("""
UpperCAmelCase_ : List[Any] = F"""{4 * ' '}def {test_name}("""
UpperCAmelCase_ : Optional[int] = F"""{8 * ' '}{correct_line.split()[0]}"""
UpperCAmelCase_ : List[Any] = F"""{16 * ' '}{correct_line.split()[0]}"""
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : str = []
for line in lines:
if line.startswith(A__ ):
UpperCAmelCase_ : Optional[int] = True
elif in_class and line.startswith(A__ ):
UpperCAmelCase_ : Union[str, Any] = True
elif in_class and in_func and (line.startswith(A__ ) or line.startswith(A__ )):
UpperCAmelCase_ : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase_ : Dict = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase_ : int = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * ' '}{correct_line}""" )
UpperCAmelCase_ : Tuple = False
else:
new_lines.append(A__ )
with open(A__ ,"w" ) as f:
for line in new_lines:
f.write(A__ )
def snake_case ( A__ ,A__=None ):
if fail is not None:
with open(A__ ,"r" ) as f:
UpperCAmelCase_ : str = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase_ : str = None
with open(A__ ,"r" ) as f:
UpperCAmelCase_ : List[str] = f.readlines()
UpperCAmelCase_ : int = defaultdict(A__ )
for line in correct_lines:
UpperCAmelCase_ : List[Any] = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(A__ ,A__ ,A__ ,A__ ,A__ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
lowerCamelCase_ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 268 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Tuple = """M-CLIP"""
def __init__( self , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=7_6_8 , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
a__ : int =transformerDimSize
a__ : Dict =imageDimSize
super().__init__(**lowerCAmelCase__ )
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Optional[Any] = MCLIPConfig
def __init__( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Tuple =XLMRobertaModel(lowerCAmelCase__ )
a__ : List[str] =torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] =self.transformer(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
a__ : int =(embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowerCAmelCase__ ), embs
| 95 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.