code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowerCAmelCase :str = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Tuple = """vision-encoder-decoder"""
A_ : Any = True
def __init__( self : int , **_A : Optional[int] ) -> Any:
super().__init__(**_A )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'A configuraton of type {self.model_type} cannot be instantiated because '
F'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' )
__magic_name__ : List[Any] = kwargs.pop('encoder' )
__magic_name__ : int = encoder_config.pop('model_type' )
__magic_name__ : Dict = kwargs.pop('decoder' )
__magic_name__ : Dict = decoder_config.pop('model_type' )
__magic_name__ : str = AutoConfig.for_model(_A , **_A )
__magic_name__ : str = AutoConfig.for_model(_A , **_A )
__magic_name__ : List[Any] = True
@classmethod
def __lowerCAmelCase ( cls : List[str] , _A : PretrainedConfig , _A : PretrainedConfig , **_A : str ) -> PretrainedConfig:
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
__magic_name__ : Tuple = True
__magic_name__ : Optional[int] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_A )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
__magic_name__ : str = copy.deepcopy(self.__dict__ )
__magic_name__ : Dict = self.encoder.to_dict()
__magic_name__ : Tuple = self.decoder.to_dict()
__magic_name__ : str = self.__class__.model_type
return output
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : List[str] = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> float:
return 1E-4
@property
def __lowerCAmelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
__magic_name__ : Union[str, Any] = OrderedDict()
__magic_name__ : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
__magic_name__ : List[Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
__magic_name__ : List[str] = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def __lowerCAmelCase ( self : str , _A : "PreTrainedTokenizerBase" , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
import torch
__magic_name__ : Optional[int] = OrderedDict()
__magic_name__ : List[Any] = super().generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
__magic_name__ , __magic_name__ : int = dummy_input['input_ids'].shape
__magic_name__ : str = (batch, encoder_sequence, self._config.encoder_hidden_size)
__magic_name__ : List[Any] = dummy_input.pop('input_ids' )
__magic_name__ : Optional[Any] = dummy_input.pop('attention_mask' )
__magic_name__ : Any = torch.zeros(_A )
return common_inputs
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> None:
pass
def __lowerCAmelCase ( self : List[str] , _A : PretrainedConfig ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : PretrainedConfig , _A : PretrainedConfig , _A : str = "default" ) -> OnnxConfig:
__magic_name__ : Union[str, Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_A , _A ) | 331 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCAmelCase :Dict = logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
lowerCAmelCase :str = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCAmelCase :Any = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCAmelCase :Tuple = sorted(arg_to_scheduler.keys())
lowerCAmelCase :Any = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class _lowerCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _A : argparse.Namespace , _A : List[Any]=None , _A : Any="base" , _A : Tuple=None , _A : Union[str, Any]=None , _A : List[Any]=None , **_A : Optional[Any] , ) -> Optional[int]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_A )
__magic_name__ : List[str] = 0
__magic_name__ : Union[str, Any] = Path(self.hparams.output_dir )
__magic_name__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__magic_name__ : Optional[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , )
else:
__magic_name__ : PretrainedConfig = config
__magic_name__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , _A , _A ):
assert hasattr(self.config , _A ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , _A , getattr(self.hparams , _A ) )
if tokenizer is None:
__magic_name__ : List[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , )
else:
__magic_name__ : PreTrainedTokenizer = tokenizer
__magic_name__ : Optional[int] = MODEL_MODES[mode]
if model is None:
__magic_name__ : Tuple = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , )
else:
__magic_name__ : str = model
def __lowerCAmelCase ( self : Optional[int] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple:
__magic_name__ : Any = self.model_type.from_pretrained(*_A , **_A )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler]
__magic_name__ : str = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__magic_name__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : Optional[Any] = self.model
__magic_name__ : int = ['bias', 'LayerNorm.weight']
__magic_name__ : Dict = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__magic_name__ : str = Adafactor(
_A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A )
else:
__magic_name__ : Tuple = AdamW(
_A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__magic_name__ : List[str] = optimizer
__magic_name__ : int = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Tuple ) -> Optional[Any]:
return self.validation_step(_A , _A )
def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Any:
return self.validation_end(_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
__magic_name__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__magic_name__ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __lowerCAmelCase ( self : str , _A : Optional[int] ) -> str:
if stage == "test":
__magic_name__ : Any = len(self.test_dataloader().dataset )
else:
__magic_name__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A )
__magic_name__ : int = len(self.train_dataloader().dataset )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> Optional[int]:
raise NotImplementedError('You must implement this for your task' )
def __lowerCAmelCase ( self : int ) -> List[str]:
return self.train_loader
def __lowerCAmelCase ( self : Tuple ) -> int:
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> str:
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
_A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __lowerCAmelCase ( self : List[str] , _A : Dict[str, Any] ) -> None:
__magic_name__ : Dict = self.output_dir.joinpath('best_tfmr' )
__magic_name__ : List[Any] = self.step_count
self.model.save_pretrained(_A )
self.tokenizer.save_pretrained(_A )
@staticmethod
def __lowerCAmelCase ( _A : List[str] , _A : Optional[Any] ) -> Tuple:
parser.add_argument(
'--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A )
parser.add_argument('--train_batch_size' , default=32 , type=_A )
parser.add_argument('--eval_batch_size' , default=32 , type=_A )
parser.add_argument('--adafactor' , action='store_true' )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any] ) -> List[str]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : str ) -> List[str]:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_A )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Dict ) -> Optional[Any]:
__magic_name__ : Dict = trainer.lr_schedulers[0]['scheduler']
__magic_name__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_A )
def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[int]:
rank_zero_info('***** Validation results *****' )
__magic_name__ : str = trainer.callback_metrics
# Log results
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[Any]:
rank_zero_info('***** Test results *****' )
__magic_name__ : Optional[int] = trainer.callback_metrics
# Log and save results to file
__magic_name__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(_A , 'w' ) as writer:
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
parser.add_argument(
'--output_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCAmelCase , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=lowerCAmelCase , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def lowerCamelCase ( lowerCAmelCase : BaseTransformer , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__magic_name__ : Any = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase )
# add custom checkpoints
if checkpoint_callback is None:
__magic_name__ : List[Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase )
if logging_callback is None:
__magic_name__ : Dict = LoggingCallback()
__magic_name__ : List[str] = {}
if args.fpaa:
__magic_name__ : Dict = 16
if args.gpus > 1:
__magic_name__ : Tuple = 'auto'
__magic_name__ : int = 'ddp'
__magic_name__ : str = args.accumulate_grad_batches
__magic_name__ : str = None
__magic_name__ : List[str] = 'auto'
__magic_name__ : List[Any] = pl.Trainer.from_argparse_args(
lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , )
if args.do_train:
trainer.fit(lowerCAmelCase )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer | 331 | 1 |
'''simple docstring'''
import functools
def _snake_case ( A , A ) -> int:
# Validation
if not isinstance(A , A ) or not all(isinstance(A , A ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(A ) != 3 or not all(isinstance(A , A ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(A ) == 0:
return 0
if min(A ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(A ) >= 366:
raise ValueError('''All days elements should be less than 366''' )
lowerCAmelCase__ = set(A )
@functools.cache
def dynamic_programming(A ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 228 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ = None ) -> List[str]:
lowerCAmelCase__ = value
lowerCAmelCase__ = None # Added in order to delete a node easier
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ = None ) -> Union[str, Any]:
lowerCAmelCase__ = root
def __str__( self ) -> str:
return str(self.root )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if new_children is not None: # reset its kids
lowerCAmelCase__ = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCamelCase_ ): # If it is the right children
lowerCAmelCase__ = new_children
else:
lowerCAmelCase__ = new_children
else:
lowerCAmelCase__ = new_children
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
return self.root is None
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = Node(lowerCamelCase_ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase__ = new_node # set its root
else: # Tree is not empty
lowerCAmelCase__ = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase__ = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase__ = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase__ = new_node
break
else:
lowerCAmelCase__ = parent_node.right
lowerCAmelCase__ = parent_node
def __SCREAMING_SNAKE_CASE ( self , *lowerCamelCase_ ) -> None:
for value in values:
self.__insert(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Node | None:
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
lowerCAmelCase__ = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase__ = node.left if value < node.value else node.right
return node
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = None ) -> Node | None:
if node is None:
if self.root is None:
return None
lowerCAmelCase__ = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase__ = node.right
return node
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = None ) -> Node | None:
if node is None:
lowerCAmelCase__ = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase__ = self.root
while node.left is not None:
lowerCAmelCase__ = node.left
return node
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = self.search(lowerCamelCase_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCamelCase_ , lowerCamelCase_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCamelCase_ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCamelCase_ , node.left )
else:
lowerCAmelCase__ = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase__ = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if node:
self.inorder(lowerCamelCase_ , node.left )
arr.append(node.value )
self.inorder(lowerCamelCase_ , node.right )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = []
self.inorder(lowerCamelCase_ , lowerCamelCase_ ) # append all values to list using inorder traversal
return arr[k - 1]
def _snake_case ( A ) -> list[Node]:
lowerCAmelCase__ = []
if curr_node is not None:
lowerCAmelCase__ = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _snake_case ( ) -> None:
lowerCAmelCase__ = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase__ = BinarySearchTree()
for i in testlist:
t.insert(A )
# Prints all the elements of the list in order traversal
print(A )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(A )
print(A )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 228 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : list ):
"""simple docstring"""
_enforce_args(lowerCAmelCase , lowerCAmelCase )
if n == 0:
return 0
__magic_name__ : List[str] = float('-inf' )
for i in range(1 , n + 1 ):
__magic_name__ : List[str] = max(
lowerCAmelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , lowerCAmelCase ) )
return max_revue
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : list ):
"""simple docstring"""
_enforce_args(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Optional[Any] = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : list , lowerCAmelCase : list ):
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__magic_name__ : List[str] = float('-inf' )
for i in range(1 , n + 1 ):
__magic_name__ : Any = max(
lowerCAmelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowerCAmelCase , lowerCAmelCase ) , )
__magic_name__ : Optional[Any] = max_revenue
return max_rev[n]
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : list ):
"""simple docstring"""
_enforce_args(lowerCAmelCase , lowerCAmelCase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__magic_name__ : List[Any] = [float('-inf' ) for _ in range(n + 1 )]
__magic_name__ : Optional[Any] = 0
for i in range(1 , n + 1 ):
__magic_name__ : int = max_rev[i]
for j in range(1 , i + 1 ):
__magic_name__ : int = max(lowerCAmelCase , prices[j - 1] + max_rev[i - j] )
__magic_name__ : Optional[Any] = max_revenue_i
return max_rev[n]
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : list ):
"""simple docstring"""
if n < 0:
__magic_name__ : List[str] = f'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(lowerCAmelCase )
if n > len(lowerCAmelCase ):
__magic_name__ : Dict = (
'Each integral piece of rod must have a corresponding price. '
f'Got n = {n} but length of prices = {len(lowerCAmelCase )}'
)
raise ValueError(lowerCAmelCase )
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = [6, 10, 12, 15, 20, 23]
__magic_name__ : Union[str, Any] = len(lowerCAmelCase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__magic_name__ : List[Any] = 36
__magic_name__ : Optional[int] = top_down_cut_rod(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : List[str] = bottom_up_cut_rod(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Optional[int] = naive_cut_rod_recursive(lowerCAmelCase , lowerCAmelCase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main() | 331 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Dict = (DDPMScheduler,)
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str:
__magic_name__ : str = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_A )
return config
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
self.check_over_configs(thresholding=_A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Dict = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def __lowerCAmelCase ( self : Tuple ) -> int:
__magic_name__ : Tuple = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : str = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Union[str, Any] = self.dummy_model()
__magic_name__ : List[Any] = self.dummy_sample_deter
__magic_name__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : Tuple = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : Dict = pred_prev_sample
__magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) )
__magic_name__ : Dict = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
__magic_name__ : Any = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Dict = self.dummy_model()
__magic_name__ : str = self.dummy_sample_deter
__magic_name__ : str = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : List[Any] = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : List[Any] = pred_prev_sample
__magic_name__ : int = torch.sum(torch.abs(_A ) )
__magic_name__ : Any = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def __lowerCAmelCase ( self : List[str] ) -> str:
__magic_name__ : Dict = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Optional[Any] = scheduler_class(**_A )
__magic_name__ : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_A )
__magic_name__ : List[str] = scheduler.timesteps
for i, timestep in enumerate(_A ):
if i == len(_A ) - 1:
__magic_name__ : Optional[int] = -1
else:
__magic_name__ : List[Any] = timesteps[i + 1]
__magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A )
__magic_name__ : Any = prev_t.item()
self.assertEqual(_A , _A )
def __lowerCAmelCase ( self : Tuple ) -> str:
__magic_name__ : str = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 1, 0]
__magic_name__ : Tuple = len(_A )
with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_A ) | 331 | 1 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
def __init__(self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : int=3 , __UpperCAmelCase : Optional[int]=3_2 , __UpperCAmelCase : Any=3 , __UpperCAmelCase : Dict=1_0 , __UpperCAmelCase : int=[1_0, 2_0, 3_0, 4_0] , __UpperCAmelCase : List[str]=[1, 1, 2, 1] , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Optional[Any]="relu" , __UpperCAmelCase : Any=3 , __UpperCAmelCase : Union[str, Any]=None , ) -> int:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = embeddings_size
UpperCAmelCase__ = hidden_sizes
UpperCAmelCase__ = depths
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = scope
UpperCAmelCase__ = len(__UpperCAmelCase )
def lowercase_ (self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowercase_ (self : Any , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFRegNetModel(config=__UpperCAmelCase )
UpperCAmelCase__ = model(__UpperCAmelCase , training=__UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowercase_ (self : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFRegNetForImageClassification(__UpperCAmelCase )
UpperCAmelCase__ = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ (self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Dict = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
__UpperCAmelCase : Tuple = (
{'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[int] = False
def lowercase_ (self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = TFRegNetModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def lowercase_ (self : Any ) -> Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def lowercase_ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def lowercase_ (self : Tuple ) -> Tuple:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def lowercase_ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowercase_ (self : List[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__UpperCAmelCase )
UpperCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def lowercase_ (self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase_ (self : int ) -> str:
"""simple docstring"""
def check_hidden_states_output(__UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : List[str] ):
UpperCAmelCase__ = model_class(__UpperCAmelCase )
UpperCAmelCase__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) , training=__UpperCAmelCase )
UpperCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase__ = layer_type
UpperCAmelCase__ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__UpperCAmelCase : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any]={} ):
UpperCAmelCase__ = model(__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = model(__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase ).to_tuple()
def recursive_check(__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] ):
if isinstance(__UpperCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__UpperCAmelCase , __UpperCAmelCase ):
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__UpperCAmelCase , __UpperCAmelCase ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__UpperCAmelCase )
UpperCAmelCase__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
UpperCAmelCase__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {"output_hidden_states": True} )
UpperCAmelCase__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
UpperCAmelCase__ = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {"output_hidden_states": True} )
def lowercase_ (self : List[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def lowercase_ (self : Optional[int] ) -> Dict:
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = TFRegNetModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowercase_ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase_ (self : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=__UpperCAmelCase , return_tensors="tf" )
# forward pass
UpperCAmelCase__ = model(**__UpperCAmelCase , training=__UpperCAmelCase )
# verify the logits
UpperCAmelCase__ = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
UpperCAmelCase__ = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 )
| 143 | import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive", [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
], )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
UpperCAmelCase__ , UpperCAmelCase__ = input_paths_and_base_extractors[compression_format]
if input_path is None:
UpperCAmelCase__ = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
assert base_extractor.is_extractable(__A )
UpperCAmelCase__ = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(__A, __A )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ = file_path.read_text(encoding="utf-8" )
else:
UpperCAmelCase__ = output_path.read_text(encoding="utf-8" )
UpperCAmelCase__ = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive", [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
], )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A, ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
UpperCAmelCase__ = input_paths[compression_format]
if input_path is None:
UpperCAmelCase__ = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__A )
UpperCAmelCase__ = Extractor.infer_extractor_format(__A )
assert extractor_format is not None
UpperCAmelCase__ = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(__A, __A, __A )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ = file_path.read_text(encoding="utf-8" )
else:
UpperCAmelCase__ = output_path.read_text(encoding="utf-8" )
UpperCAmelCase__ = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCAmelCase_ ( __A, __A ) -> List[str]:
'''simple docstring'''
import tarfile
UpperCAmelCase__ = tmp_path / "data_dot_dot"
directory.mkdir()
UpperCAmelCase__ = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(__A, "w" ) as f:
f.add(__A, arcname=os.path.join("..", text_file.name ) )
return path
@pytest.fixture
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
import tarfile
UpperCAmelCase__ = tmp_path / "data_sym_link"
directory.mkdir()
UpperCAmelCase__ = directory / "tar_file_with_sym_link.tar"
os.symlink("..", directory / "subdir", target_is_directory=__A )
with tarfile.TarFile(__A, "w" ) as f:
f.add(str(directory / "subdir" ), arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log", [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")], )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
UpperCAmelCase__ = insecure_tar_files[insecure_tar_file]
UpperCAmelCase__ = tmp_path / "extracted"
TarExtractor.extract(__A, __A )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
UpperCAmelCase__ = (
B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(__A )
assert zipfile.is_zipfile(str(__A ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__A ) # but we're right
| 143 | 1 |
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = eval_examples
lowercase_ : Tuple = post_process_function
def _snake_case ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "eval" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[int] = gen_kwargs.copy()
lowercase_ : List[str] = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowercase_ : str = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowercase_ : Dict = gen_kwargs
lowercase_ : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase_ : List[str] = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase_ : Union[str, Any] = self.compute_metrics
lowercase_ : Optional[int] = None
lowercase_ : Tuple = time.time()
lowercase_ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase_ : str = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
lowercase_ : Any = compute_metrics
lowercase_ : Any = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase_ : Optional[Any] = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase_ : List[Any] = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
lowercase_ : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase_ : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE )
return metrics
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Union[str, Any] = gen_kwargs.copy()
lowercase_ : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase_ : Optional[Any] = self.compute_metrics
lowercase_ : Optional[int] = None
lowercase_ : List[Any] = time.time()
lowercase_ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase_ : Tuple = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
lowercase_ : Any = compute_metrics
lowercase_ : Tuple = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase_ : Any = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '''predict''' )
lowercase_ : str = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase_ : Optional[int] = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
| 93 |
'''simple docstring'''
from math import isqrt, loga
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Any = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = False
return [i for i in range(2 , __SCREAMING_SNAKE_CASE ) if is_prime[i]]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = 800800 , __SCREAMING_SNAKE_CASE : int = 800800 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = degree * loga(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = int(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = calculate_prime_numbers(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = 0
lowercase_ : List[Any] = 0
lowercase_ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 93 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__lowercase: Any = logging.getLogger(__name__)
@dataclass(frozen=SCREAMING_SNAKE_CASE__)
class UpperCAmelCase :
_lowerCamelCase : str
_lowerCamelCase : str
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : Optional[str] = None
@dataclass(frozen=SCREAMING_SNAKE_CASE__)
class UpperCAmelCase :
_lowerCamelCase : List[int]
_lowerCamelCase : Optional[List[int]] = None
_lowerCamelCase : Optional[List[int]] = None
_lowerCamelCase : Optional[Union[int, float]] = None
_lowerCamelCase : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[InputFeatures]
def __init__( self : str, a_ : str, a_ : PreTrainedTokenizer, a_ : str, a_ : Optional[int] = None, a_ : Optional[Any]=False, a_ : bool = False, ):
"""simple docstring"""
UpperCamelCase__ = hans_processors[task]()
UpperCamelCase__ = os.path.join(
a_, "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train", tokenizer.__class__.__name__, str(a_ ), a_, ), )
UpperCamelCase__ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase__ , UpperCamelCase__ = label_list[2], label_list[1]
UpperCamelCase__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase__ = cached_features_file + ".lock"
with FileLock(a_ ):
if os.path.exists(a_ ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
UpperCamelCase__ = torch.load(a_ )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
UpperCamelCase__ = (
processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ )
)
logger.info("Training examples: %s", len(a_ ) )
UpperCamelCase__ = hans_convert_examples_to_features(a_, a_, a_, a_ )
logger.info("Saving features into cached file %s", a_ )
torch.save(self.features, a_ )
def __len__( self : List[Any] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Dict, a_ : List[Any] ):
"""simple docstring"""
return self.features[i]
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase :
_lowerCamelCase : List[InputFeatures]
def __init__( self : str, a_ : str, a_ : PreTrainedTokenizer, a_ : str, a_ : Optional[int] = 128, a_ : Union[str, Any]=False, a_ : bool = False, ):
"""simple docstring"""
UpperCamelCase__ = hans_processors[task]()
UpperCamelCase__ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase__ , UpperCamelCase__ = label_list[2], label_list[1]
UpperCamelCase__ = label_list
UpperCamelCase__ = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ )
UpperCamelCase__ = hans_convert_examples_to_features(a_, a_, a_, a_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ), desc="convert examples to features" ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(a_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase__ = tf.data.Dataset.from_generator(
a_, (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
), (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
), )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
return self.dataset
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Union[str, Any], a_ : int ):
"""simple docstring"""
return self.features[i]
def lowercase_ ( self : int ):
"""simple docstring"""
return self.label_list
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def lowercase_ ( self : Any, a_ : Dict ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a_, "heuristics_train_set.txt" ) ), "train" )
def lowercase_ ( self : Dict, a_ : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a_, "heuristics_evaluation_set.txt" ) ), "dev" )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def lowercase_ ( self : Dict, a_ : Optional[int], a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = []
for i, line in enumerate(a_ ):
if i == 0:
continue
UpperCamelCase__ = "%s-%s" % (set_type, line[0])
UpperCamelCase__ = line[5]
UpperCamelCase__ = line[6]
UpperCamelCase__ = line[7][2:] if line[7].startswith("ex" ) else line[7]
UpperCamelCase__ = line[0]
examples.append(InputExample(guid=a_, text_a=a_, text_b=a_, label=a_, pairID=a_ ) )
return examples
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[InputExample] , _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : PreTrainedTokenizer , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {label: i for i, label in enumerate(_UpperCamelCase )}
UpperCamelCase__ = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCamelCase ) , desc="convert examples to features" ):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d" % (ex_index) )
UpperCamelCase__ = tokenizer(
example.text_a , example.text_b , add_special_tokens=_UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , truncation=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , )
UpperCamelCase__ = label_map[example.label] if example.label in label_map else 0
UpperCamelCase__ = int(example.pairID )
features.append(InputFeatures(**_UpperCamelCase , label=_UpperCamelCase , pairID=_UpperCamelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
__lowercase: Dict = {
"hans": 3,
}
__lowercase: Tuple = {
"hans": HansProcessor,
} | 31 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'gpt_neox_japanese'
def __init__( self : List[str], a_ : Union[str, Any]=3_2000, a_ : str=2560, a_ : Dict=32, a_ : Tuple=32, a_ : Union[str, Any]=4, a_ : Union[str, Any]="gelu", a_ : int=1.00, a_ : Dict=1_0000, a_ : Any=2048, a_ : Optional[int]=0.02, a_ : int=1e-5, a_ : int=True, a_ : Optional[int]=3_1996, a_ : List[str]=3_1999, a_ : List[str]=0.1, a_ : Optional[int]=0.0, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout | 31 | 1 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowerCAmelCase_ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __snake_case : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( __snake_case : float = 0.1 ) -> int:
__A : Tuple = 3
__A : Optional[int] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 190 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __magic_name__ ( unittest.TestCase):
def __init__( self : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple=7 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : Optional[int]=18 , lowerCamelCase__ : Any=30 , lowerCamelCase__ : int=400 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : int=False , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Dict=[0.5, 0.5, 0.5] , lowerCamelCase__ : str=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = parent
UpperCamelCase__ : Dict = batch_size
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : int = image_size
UpperCamelCase__ : str = min_resolution
UpperCamelCase__ : str = max_resolution
UpperCamelCase__ : Tuple = do_resize
UpperCamelCase__ : str = size if size is not None else {'''height''': 18, '''width''': 20}
UpperCamelCase__ : Optional[Any] = do_thumbnail
UpperCamelCase__ : int = do_align_axis
UpperCamelCase__ : List[Any] = do_pad
UpperCamelCase__ : List[Any] = do_normalize
UpperCamelCase__ : Dict = image_mean
UpperCamelCase__ : List[Any] = image_std
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
A: Tuple = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : str ) -> int:
'''simple docstring'''
UpperCamelCase__ : int = DonutImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_thumbnail''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_pad''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_std''' ) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
UpperCamelCase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def UpperCAmelCase__ ( self : Any ) -> str:
'''simple docstring'''
pass
@is_flaky()
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
UpperCamelCase__ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase__ : List[str] = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
UpperCamelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase__ : List[Any] = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def UpperCAmelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
UpperCamelCase__ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase__ : List[str] = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 51 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : str ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : Union[str, Any] = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : Any = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
UpperCamelCase__ : Union[str, Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : List[Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCamelCase__ : Dict = DDPMScheduler()
UpperCamelCase__ : List[Any] = AudioDiffusionPipeline(vqvae=lowerCamelCase__ , unet=self.dummy_unet , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(42 )
UpperCamelCase__ : List[str] = pipe(generator=lowerCamelCase__ , steps=4 )
UpperCamelCase__ : Union[str, Any] = output.audios[0]
UpperCamelCase__ : Any = output.images[0]
UpperCamelCase__ : Optional[int] = torch.Generator(device=lowerCamelCase__ ).manual_seed(42 )
UpperCamelCase__ : int = pipe(generator=lowerCamelCase__ , steps=4 , return_dict=lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCamelCase__ : Optional[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCamelCase__ : Any = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
UpperCamelCase__ : str = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase__ : List[str] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCamelCase__ : Optional[Any] = DDIMScheduler()
UpperCamelCase__ : Union[str, Any] = self.dummy_vqvae_and_unet
UpperCamelCase__ : Union[str, Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ )
UpperCamelCase__ : str = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
np.random.seed(0 )
UpperCamelCase__ : Optional[int] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCamelCase__ : Optional[int] = torch.Generator(device=lowerCamelCase__ ).manual_seed(42 )
UpperCamelCase__ : Union[str, Any] = pipe(raw_audio=lowerCamelCase__ , generator=lowerCamelCase__ , start_step=5 , steps=10 )
UpperCamelCase__ : int = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCamelCase__ : List[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCamelCase__ : Union[str, Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase__ : Any = self.dummy_unet_condition
UpperCamelCase__ : str = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowerCamelCase__ , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ )
UpperCamelCase__ : Any = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
np.random.seed(0 )
UpperCamelCase__ : Union[str, Any] = torch.rand((1, 1, 10) )
UpperCamelCase__ : int = pipe(generator=lowerCamelCase__ , encoding=lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = output.images[0]
UpperCamelCase__ : List[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCamelCase__ : str = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = torch_device
UpperCamelCase__ : Dict = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
UpperCamelCase__ : Optional[int] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = torch.Generator(device=lowerCamelCase__ ).manual_seed(42 )
UpperCamelCase__ : Optional[Any] = pipe(generator=lowerCamelCase__ )
UpperCamelCase__ : Dict = output.audios[0]
UpperCamelCase__ : Optional[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCamelCase__ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCamelCase__ : List[Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 51 | 1 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
snake_case_ : int = "bert-base-cased"
snake_case_ : List[Any] = "google/pegasus-xsum"
snake_case_ : Optional[Any] = [" Sam ate lunch today.", "Sams lunch ingredients."]
snake_case_ : Any = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
snake_case_ : List[str] = "patrickvonplaten/t5-tiny-random"
snake_case_ : Tuple = "sshleifer/bart-tiny-random"
snake_case_ : List[str] = "sshleifer/tiny-mbart"
snake_case_ : List[Any] = "sshleifer/tiny-marian-en-de"
def A (__A : Path , __A : list ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = '''\n'''.join(__A )
Path(__A ).open('''w''' ).writelines(__A )
def A (__A : str ) -> Union[str, Any]:
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__A , F"""{split}.source""" ) , __A )
_dump_articles(os.path.join(__A , F"""{split}.target""" ) , __A )
return tmp_dir
class __snake_case ( a ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def lowerCamelCase ( self : List[str] , _snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case)
UpperCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
UpperCAmelCase_ = max(len(tokenizer.encode(_snake_case)) for a in ARTICLES)
UpperCAmelCase_ = max(len(tokenizer.encode(_snake_case)) for a in SUMMARIES)
UpperCAmelCase_ = 4
UpperCAmelCase_ = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
UpperCAmelCase_ , UpperCAmelCase_ = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
UpperCAmelCase_ = SeqaSeqDataset(
_snake_case , data_dir=_snake_case , type_path='''train''' , max_source_length=_snake_case , max_target_length=_snake_case , src_lang=_snake_case , tgt_lang=_snake_case , )
UpperCAmelCase_ = DataLoader(_snake_case , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert isinstance(_snake_case , _snake_case)
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
UpperCAmelCase_ = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id)
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED])
def lowerCamelCase ( self : Optional[int] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case)
UpperCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
UpperCAmelCase_ = max(len(tokenizer.encode(_snake_case)) for a in ARTICLES)
UpperCAmelCase_ = max(len(tokenizer.encode(_snake_case)) for a in SUMMARIES)
UpperCAmelCase_ = 4
UpperCAmelCase_ = LegacySeqaSeqDataset(
_snake_case , data_dir=_snake_case , type_path='''train''' , max_source_length=20 , max_target_length=_snake_case , )
UpperCAmelCase_ = DataLoader(_snake_case , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''')
UpperCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
UpperCAmelCase_ = tmp_dir.joinpath('''train.source''').open().readlines()
UpperCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
pack_data_dir(_snake_case , _snake_case , 128 , _snake_case)
UpperCAmelCase_ = {x.name for x in tmp_dir.iterdir()}
UpperCAmelCase_ = {x.name for x in save_dir.iterdir()}
UpperCAmelCase_ = save_dir.joinpath('''train.source''').open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_snake_case) < len(_snake_case)
assert len(_snake_case) == 1
assert len(packed_examples[0]) == sum(len(_snake_case) for x in orig_examples)
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''')
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
if not FAIRSEQ_AVAILABLE:
return
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._get_dataset(max_len=64)
UpperCAmelCase_ = 64
UpperCAmelCase_ = ds.make_dynamic_sampler(_snake_case , required_batch_size_multiple=_snake_case)
UpperCAmelCase_ = [len(_snake_case) for x in batch_sampler]
assert len(set(_snake_case)) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_snake_case) == len(_snake_case) # no dropped or added examples
UpperCAmelCase_ = DataLoader(_snake_case , batch_sampler=_snake_case , collate_fn=ds.collate_fn , num_workers=2)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for batch in data_loader:
UpperCAmelCase_ = batch['''input_ids'''].shape
UpperCAmelCase_ = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
UpperCAmelCase_ = np.product(batch['''input_ids'''].shape)
num_src_per_batch.append(_snake_case)
if num_src_tokens > (max_tokens * 1.1):
failures.append(_snake_case)
assert num_src_per_batch[0] == max(_snake_case)
if failures:
raise AssertionError(F"""too many tokens in {len(_snake_case)} batches""")
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._get_dataset(max_len=512)
UpperCAmelCase_ = 2
UpperCAmelCase_ = ds.make_sortish_sampler(_snake_case , shuffle=_snake_case)
UpperCAmelCase_ = DataLoader(_snake_case , batch_size=_snake_case , collate_fn=ds.collate_fn , num_workers=2)
UpperCAmelCase_ = DataLoader(_snake_case , batch_size=_snake_case , collate_fn=ds.collate_fn , num_workers=2 , sampler=_snake_case)
UpperCAmelCase_ = tokenizer.pad_token_id
def count_pad_tokens(_snake_case : List[Any] , _snake_case : Tuple="input_ids"):
return [batch[k].eq(_snake_case).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_snake_case , k='''labels''')) < sum(count_pad_tokens(_snake_case , k='''labels'''))
assert sum(count_pad_tokens(_snake_case)) < sum(count_pad_tokens(_snake_case))
assert len(_snake_case) == len(_snake_case)
def lowerCamelCase ( self : Optional[Any] , _snake_case : Dict=1000 , _snake_case : str=128):
"""simple docstring"""
if os.getenv('''USE_REAL_DATA''' , _snake_case):
UpperCAmelCase_ = '''examples/seq2seq/wmt_en_ro'''
UpperCAmelCase_ = max_len * 2 * 64
if not Path(_snake_case).joinpath('''train.len''').exists():
save_len_file(_snake_case , _snake_case)
else:
UpperCAmelCase_ = '''examples/seq2seq/test_data/wmt_en_ro'''
UpperCAmelCase_ = max_len * 4
save_len_file(_snake_case , _snake_case)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case)
UpperCAmelCase_ = SeqaSeqDataset(
_snake_case , data_dir=_snake_case , type_path='''train''' , max_source_length=_snake_case , max_target_length=_snake_case , n_obs=_snake_case , )
return ds, max_tokens, tokenizer
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._get_dataset()
UpperCAmelCase_ = set(DistributedSortishSampler(_snake_case , 256 , num_replicas=2 , rank=0 , add_extra_examples=_snake_case))
UpperCAmelCase_ = set(DistributedSortishSampler(_snake_case , 256 , num_replicas=2 , rank=1 , add_extra_examples=_snake_case))
assert idsa.intersection(_snake_case) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def lowerCamelCase ( self : List[str] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case , use_fast=_snake_case)
if tok_name == MBART_TINY:
UpperCAmelCase_ = SeqaSeqDataset(
_snake_case , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
UpperCAmelCase_ = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
UpperCAmelCase_ = SeqaSeqDataset(
_snake_case , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
UpperCAmelCase_ = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_snake_case) == 1 if tok_name == BART_TINY else len(_snake_case) == 0
| 51 | '''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self : int , lowercase_ : Optional[int] , lowercase_ : Any=13 , lowercase_ : List[str]=7 , lowercase_ : List[Any]=True , lowercase_ : str=True , lowercase_ : Dict=True , lowercase_ : List[str]=True , lowercase_ : List[str]=99 , lowercase_ : Dict=32 , lowercase_ : List[Any]=5 , lowercase_ : List[str]=4 , lowercase_ : Dict=37 , lowercase_ : List[Any]="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Any=0.1 , lowercase_ : int=512 , lowercase_ : Tuple=16 , lowercase_ : str=2 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Any=3 , lowercase_ : Any=4 , lowercase_ : Dict=None , ):
lowercase_ : Tuple = parent
lowercase_ : Tuple = batch_size
lowercase_ : Optional[int] = seq_length
lowercase_ : Union[str, Any] = is_training
lowercase_ : int = use_input_mask
lowercase_ : Union[str, Any] = use_token_type_ids
lowercase_ : Tuple = use_labels
lowercase_ : Tuple = vocab_size
lowercase_ : int = hidden_size
lowercase_ : int = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : Union[str, Any] = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : int = hidden_dropout_prob
lowercase_ : Union[str, Any] = attention_probs_dropout_prob
lowercase_ : List[Any] = max_position_embeddings
lowercase_ : Union[str, Any] = type_vocab_size
lowercase_ : List[Any] = type_sequence_label_size
lowercase_ : Optional[int] = initializer_range
lowercase_ : str = num_labels
lowercase_ : int = num_choices
lowercase_ : List[Any] = scope
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : str = None
if self.use_input_mask:
lowercase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Optional[int] = None
if self.use_token_type_ids:
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : str = None
lowercase_ : Optional[int] = None
lowercase_ : Union[str, Any] = None
if self.use_labels:
lowercase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : int ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[int] ):
lowercase_ : Optional[Any] = NystromformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
lowercase_ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ )
lowercase_ : Union[str, Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any ):
lowercase_ : List[Any] = NystromformerForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Tuple ):
lowercase_ : Any = NystromformerForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : int ):
lowercase_ : Any = self.num_labels
lowercase_ : Union[str, Any] = NystromformerForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str] ):
lowercase_ : int = self.num_labels
lowercase_ : int = NystromformerForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Union[str, Any] ):
lowercase_ : str = self.num_choices
lowercase_ : Union[str, Any] = NystromformerForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = config_and_inputs
lowercase_ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Any = NystromformerModelTester(self )
lowercase_ : Optional[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : int = type
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = NystromformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : List[str] = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" )
lowercase_ : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
lowercase_ : Tuple = model(lowercase_ )[0]
lowercase_ : Tuple = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , lowercase_ )
lowercase_ : Dict = torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = """the [MASK] of Belgium is Brussels"""
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" )
lowercase_ : List[Any] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" )
lowercase_ : str = tokenizer(lowercase_ , return_tensors="""pt""" )
with torch.no_grad():
lowercase_ : Tuple = model(encoding.input_ids ).logits
lowercase_ : Optional[int] = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(lowercase_ ) , """capital""" )
| 239 | 0 |
import sys
_snake_case : str = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowerCAmelCase_ ( __lowerCamelCase = N ):
__snake_case : Optional[Any] = -sys.maxsize - 1
for i in range(len(__lowerCamelCase ) - 1_2 ):
__snake_case : int = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
__snake_case : Union[str, Any] = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 134 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ["image_processor", "tokenizer"]
__UpperCAmelCase : str = "OwlViTImageProcessor"
__UpperCAmelCase : Dict = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : str , lowerCamelCase : Any=None , lowerCamelCase : Any=None , **lowerCamelCase : Union[str, Any] ) -> List[Any]:
__snake_case : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase , )
__snake_case : List[Any] = kwargs.pop("feature_extractor" )
__snake_case : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self : Union[str, Any] , lowerCamelCase : Tuple=None , lowerCamelCase : int=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : List[str]="max_length" , lowerCamelCase : Dict="np" , **lowerCamelCase : str ) -> List[Any]:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(lowerCamelCase , lowerCamelCase ) or (isinstance(lowerCamelCase , lowerCamelCase ) and not isinstance(text[0] , lowerCamelCase )):
__snake_case : Union[str, Any] = [self.tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )]
elif isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(text[0] , lowerCamelCase ):
__snake_case : Tuple = []
# Maximum number of queries across batch
__snake_case : str = max([len(lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCamelCase ) != max_num_queries:
__snake_case : Dict = t + [" "] * (max_num_queries - len(lowerCamelCase ))
__snake_case : int = self.tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
encodings.append(lowerCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__snake_case : Any = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Tuple = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__snake_case : List[Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Any = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__snake_case : int = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__snake_case : int = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__snake_case : int = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__snake_case : Dict = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__snake_case : Any = BatchEncoding()
__snake_case : Tuple = input_ids
__snake_case : int = attention_mask
if query_images is not None:
__snake_case : List[Any] = BatchEncoding()
__snake_case : Union[str, Any] = self.image_processor(
lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase ).pixel_values
__snake_case : str = query_pixel_values
if images is not None:
__snake_case : Optional[int] = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is not None and images is not None:
__snake_case : List[str] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__snake_case : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase ) , tensor_type=lowerCamelCase )
def __snake_case ( self : Dict , *lowerCamelCase : List[Any] , **lowerCamelCase : Union[str, Any] ) -> str:
return self.image_processor.post_process(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase : str , **lowerCamelCase : List[str] ) -> Tuple:
return self.image_processor.post_process_object_detection(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[Any] , *lowerCamelCase : Optional[Any] , **lowerCamelCase : Optional[Any] ) -> Any:
return self.image_processor.post_process_image_guided_detection(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : List[Any] , *lowerCamelCase : Tuple , **lowerCamelCase : Optional[int] ) -> str:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Union[str, Any] , *lowerCamelCase : Tuple , **lowerCamelCase : List[Any] ) -> Tuple:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : Any ) -> Dict:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase , )
return self.image_processor_class
@property
def __snake_case ( self : List[str] ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase , )
return self.image_processor
| 134 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : str = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = ["""MaskFormerFeatureExtractor"""]
lowercase : int = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
lowercase : Union[str, Any] = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
lowercase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 20 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Tuple:
lowercase : Union[str, Any] = []
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
f"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
f"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
f"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
f"stage{idx}.patch_embed.norm.bias",
) )
return embed
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
lowercase : Optional[Any] = []
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
f"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
f"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", f"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", f"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", f"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", f"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
lowercase : Optional[Any] = []
token.append((f"cvt.encoder.stages.{idx}.cls_token", """stage2.cls_token""") )
return token
def _snake_case( ) -> Dict:
lowercase : Optional[Any] = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
lowercase : Any = """imagenet-1k-id2label.json"""
lowercase : List[str] = 1_000
lowercase : int = """huggingface/label-files"""
lowercase : Union[str, Any] = num_labels
lowercase : Optional[Any] = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) ) , """r""" ) )
lowercase : List[Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
lowercase : Dict = idalabel
lowercase : List[str] = {v: k for k, v in idalabel.items()}
lowercase : List[str] = CvtConfig(num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
lowercase : Tuple = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
lowercase : Dict = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase : int = [2, 2, 20]
lowercase : Optional[int] = [3, 12, 16]
lowercase : str = [192, 768, 1_024]
lowercase : Union[str, Any] = CvtForImageClassification(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
lowercase : Optional[Any] = image_size
lowercase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device("""cpu""" ) )
lowercase : Optional[Any] = OrderedDict()
lowercase : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase : Optional[Any] = list_of_state_dict + cls_token(SCREAMING_SNAKE_CASE__ )
lowercase : str = list_of_state_dict + embeddings(SCREAMING_SNAKE_CASE__ )
for cnt in range(config.depth[idx] ):
lowercase : List[str] = list_of_state_dict + attention(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
lowercase : Optional[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=R"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowercase : Optional[int] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 20 | 1 |
import pprint
import requests
a : int = """https://zenquotes.io/api"""
def __lowerCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __lowerCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
a : int = random_quotes()
pprint.pprint(response)
| 370 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
a : List[str] = logging.getLogger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A , A , A=None ) -> Union[str, Any]:
super().__init__(
A , question_encoder_tokenizer=A , generator_tokenizer=A , index=A , init_retrieval=A , )
UpperCAmelCase : Optional[Any] = None
def _lowercase( self , A ) -> List[Any]:
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
UpperCAmelCase : Tuple = self._infer_socket_ifname()
# avoid clash with the NCCL port
UpperCAmelCase : str = str(distributed_port + 1 )
UpperCAmelCase : Any = dist.new_group(ranks=A , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _lowercase( self ) -> Dict:
return dist.get_rank(group=self.process_group ) == 0
def _lowercase( self , A , A , A=torch.floataa ) -> str:
UpperCAmelCase : List[Any] = torch.empty(A , dtype=A )
dist.scatter(A , src=0 , scatter_list=A , group=self.process_group )
return target_tensor
def _lowercase( self ) -> Any:
UpperCAmelCase : List[Any] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
UpperCAmelCase : Optional[int] = next((addr for addr in addrs if addr.startswith("""e""" )) , A )
return ifname
def _lowercase( self , A , A ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
UpperCAmelCase , UpperCAmelCase : str = self._main_retrieve(A , A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A )
# distributed training
UpperCAmelCase : int = dist.get_world_size(group=self.process_group )
# gather logic
UpperCAmelCase : int = None
if self._is_main():
UpperCAmelCase : List[str] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(A )]
dist.gather(torch.tensor(A ) , dst=0 , gather_list=A , group=self.process_group )
# scatter logic
UpperCAmelCase : List[Any] = question_hidden_states.shape[0]
UpperCAmelCase : Tuple = []
UpperCAmelCase : Any = []
if self._is_main():
assert len(A ) == world_size
UpperCAmelCase , UpperCAmelCase : Optional[int] = self._main_retrieve(torch.cat(A ).numpy() , A )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = torch.tensor(A ), torch.tensor(A )
UpperCAmelCase : List[str] = self._chunk_tensor(A , A )
UpperCAmelCase : Union[str, Any] = self._chunk_tensor(A , A )
UpperCAmelCase : Tuple = self._scattered(A , [n_queries, n_docs] , target_type=torch.intaa )
UpperCAmelCase : Optional[Any] = self._scattered(A , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(A )
| 338 | 0 |
import inspect
import unittest
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Any:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _snake_case ( self )->Dict:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
A_ : int = inspect.getmembers(A_ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
A_ : Tuple = '''k-diffusion'''
elif backend == "invisible_watermark":
A_ : Optional[int] = '''invisible-watermark'''
assert backend in deps, F'''{backend} is not in the deps table!'''
| 186 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : int = {"vocab_file": "vocab.txt"}
_UpperCAmelCase : str = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
_UpperCAmelCase : Optional[Any] = {
"openbmb/cpm-ant-10b": 1_024,
}
def A ( lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = collections.OrderedDict()
with open(lowercase , 'r' , encoding='utf-8' ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(lowercase ):
UpperCamelCase = token.rstrip('\n' )
UpperCamelCase = index
return vocab
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_="<unk>" , A_=200 ) -> Dict:
"""simple docstring"""
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def __UpperCamelCase ( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = list(A_ )
if len(A_ ) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(A_ ):
UpperCamelCase = len(A_ )
UpperCamelCase = None
while start < end:
UpperCamelCase = ''.join(chars[start:end] )
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(A_ )
UpperCamelCase = end
return sub_tokens
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : List[str] = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = ["input_ids", "attention_mask"]
__lowercase : Tuple = False
def __init__( self , A_ , A_="<d>" , A_="</d>" , A_="<s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="</n>" , A_="</_>" , A_="left" , **A_ , ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=A_ , eod_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , unk_token=A_ , line_token=A_ , space_token=A_ , padding_side=A_ , **A_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(A_ )
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda A_ : x[1] ) )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = []
for x in jieba.cut(A_ , cut_all=A_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(A_ ) )
return output_tokens
def __UpperCamelCase ( self , A_ , **A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(A_ , **A_ )
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
return token in self.encoder
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
return "".join(A_ )
def __UpperCamelCase ( self , A_ ) -> Tuple:
"""simple docstring"""
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
return self.decoder.get(A_ , self.unk_token )
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(A_ ):
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
UpperCamelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder['\n']
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda A_ : x[1] ) )
with open(A_ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
UpperCamelCase = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __UpperCamelCase ( self , A_ , A_ = None , A_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is not None:
return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ ))
return [1] + ([0] * len(A_ ))
| 222 | 0 |
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase_ (UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple ):
_UpperCAmelCase : Optional[Any] = torch.load(_lowerCamelCase , map_location='''cpu''' )
_UpperCAmelCase : Optional[Any] = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
_UpperCAmelCase : Optional[Any] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
_UpperCAmelCase : int = v
else:
_UpperCAmelCase : Any = v
_UpperCAmelCase : Dict = chkpt["""params"""]
_UpperCAmelCase : str = {n: v for n, v in config.items() if not isinstance(_lowerCamelCase , (torch.FloatTensor, numpy.ndarray) )}
_UpperCAmelCase : List[str] = chkpt["""dico_word2id"""]
_UpperCAmelCase : Union[str, Any] = {s + """</w>""" if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
_UpperCAmelCase : Tuple = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_UpperCAmelCase : Optional[int] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
_UpperCAmelCase : List[Any] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowerCamelCase , indent=2 ) + '''\n''' )
print(F'Save vocab file to {pytorch_config_dump_path}' )
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowerCamelCase , indent=2 ) + '''\n''' )
if __name__ == "__main__":
_lowerCAmelCase :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_lowerCAmelCase :Optional[int] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 363 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _UpperCAmelCase :
'''simple docstring'''
a__ =None
a__ =None
a__ =None # sigma(t_i)
@classmethod
def __lowerCAmelCase ( cls ) -> Tuple:
return cls()
@dataclass
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =42
a__ =42
a__ =42
class _UpperCAmelCase ( a ,a ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
return True
@register_to_config
def __init__( self , A = 0.02 , A = 1_0_0 , A = 1.007 , A = 8_0 , A = 0.05 , A = 5_0 , ) -> Any:
pass
def __lowerCAmelCase ( self ) -> int:
return KarrasVeSchedulerState.create()
def __lowerCAmelCase ( self , A , A , A = () ) -> KarrasVeSchedulerState:
_UpperCAmelCase : Tuple = jnp.arange(0 , A )[::-1].copy()
_UpperCAmelCase : Dict = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=A , schedule=jnp.array(A , dtype=jnp.floataa ) , timesteps=A , )
def __lowerCAmelCase ( self , A , A , A , A , ) -> Tuple[jnp.ndarray, float]:
if self.config.s_min <= sigma <= self.config.s_max:
_UpperCAmelCase : Optional[Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
_UpperCAmelCase : Tuple = 0
# sample eps ~ N(0, S_noise^2 * I)
_UpperCAmelCase : Any = random.split(A , num=1 )
_UpperCAmelCase : Optional[Any] = self.config.s_noise * random.normal(key=A , shape=sample.shape )
_UpperCAmelCase : str = sigma + gamma * sigma
_UpperCAmelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __lowerCAmelCase ( self , A , A , A , A , A , A = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
_UpperCAmelCase : Dict = sample_hat + sigma_hat * model_output
_UpperCAmelCase : int = (sample_hat - pred_original_sample) / sigma_hat
_UpperCAmelCase : Any = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=A , derivative=A , state=A )
def __lowerCAmelCase ( self , A , A , A , A , A , A , A , A = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
_UpperCAmelCase : Union[str, Any] = sample_prev + sigma_prev * model_output
_UpperCAmelCase : List[Any] = (sample_prev - pred_original_sample) / sigma_prev
_UpperCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=A , derivative=A , state=A )
def __lowerCAmelCase ( self , A , A , A , A ) -> Any:
raise NotImplementedError()
| 68 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase ):
snake_case_ = ["""torch""", """transformers""", """onnx"""]
def __init__( self : str , *__lowercase : List[str] , **__lowercase : Optional[Any] ) -> str:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __magic_name__ ( cls : Optional[Any] , *__lowercase : Union[str, Any] , **__lowercase : Union[str, Any] ) -> Dict:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __magic_name__ ( cls : int , *__lowercase : List[Any] , **__lowercase : str ) -> List[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase ):
snake_case_ = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Optional[int] , *__lowercase : Union[str, Any] , **__lowercase : Tuple ) -> int:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __magic_name__ ( cls : Any , *__lowercase : List[str] , **__lowercase : Union[str, Any] ) -> Dict:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __magic_name__ ( cls : int , *__lowercase : Any , **__lowercase : int ) -> List[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase ):
snake_case_ = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Any , *__lowercase : List[Any] , **__lowercase : Union[str, Any] ) -> str:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __magic_name__ ( cls : Optional[Any] , *__lowercase : List[Any] , **__lowercase : Union[str, Any] ) -> Any:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __magic_name__ ( cls : Union[str, Any] , *__lowercase : Tuple , **__lowercase : int ) -> Any:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase ):
snake_case_ = ["""torch""", """transformers""", """onnx"""]
def __init__( self : List[Any] , *__lowercase : Optional[Any] , **__lowercase : List[str] ) -> str:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __magic_name__ ( cls : str , *__lowercase : Dict , **__lowercase : Dict ) -> Dict:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __magic_name__ ( cls : Tuple , *__lowercase : List[Any] , **__lowercase : int ) -> Tuple:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase ):
snake_case_ = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Tuple , *__lowercase : Any , **__lowercase : List[str] ) -> Dict:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __magic_name__ ( cls : Optional[int] , *__lowercase : Dict , **__lowercase : Dict ) -> Any:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __magic_name__ ( cls : Union[str, Any] , *__lowercase : Optional[int] , **__lowercase : Union[str, Any] ) -> str:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCamelCase ):
snake_case_ = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Optional[Any] , *__lowercase : Optional[Any] , **__lowercase : str ) -> Any:
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __magic_name__ ( cls : Optional[Any] , *__lowercase : Union[str, Any] , **__lowercase : List[str] ) -> Dict:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __magic_name__ ( cls : int , *__lowercase : Tuple , **__lowercase : Tuple ) -> List[Any]:
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) | 152 |
'''simple docstring'''
import socket
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =socket.socket(socket.AF_INET, socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE__ : str =socket.gethostname()
SCREAMING_SNAKE_CASE__ : List[Any] =1_2_3_1_2
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''', '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
SCREAMING_SNAKE_CASE__ : List[str] =sock.recv(1_0_2_4 )
if not data:
break
out_file.write(UpperCamelCase__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main() | 152 | 1 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> np.array:
UpperCamelCase_: Dict = F'''{sampling_rate}'''
UpperCamelCase_: Any = '1'
UpperCamelCase_: Any = 'f32le'
UpperCamelCase_: Union[str, Any] = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(UpperCAmelCase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCamelCase_: Optional[Any] = ffmpeg_process.communicate(UpperCAmelCase__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
UpperCamelCase_: Union[str, Any] = output_stream[0]
UpperCamelCase_: List[str] = np.frombuffer(UpperCAmelCase__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = "f32le" , ) -> Tuple:
UpperCamelCase_: Any = F'''{sampling_rate}'''
UpperCamelCase_: Union[str, Any] = '1'
if format_for_conversion == "s16le":
UpperCamelCase_: Optional[Any] = 2
elif format_for_conversion == "f32le":
UpperCamelCase_: Any = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCamelCase_: int = platform.system()
if system == "Linux":
UpperCamelCase_: Tuple = 'alsa'
UpperCamelCase_: List[str] = 'default'
elif system == "Darwin":
UpperCamelCase_: int = 'avfoundation'
UpperCamelCase_: Union[str, Any] = ':0'
elif system == "Windows":
UpperCamelCase_: Tuple = 'dshow'
UpperCamelCase_: Dict = 'default'
UpperCamelCase_: Any = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
UpperCamelCase_: Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCamelCase_: Optional[int] = _ffmpeg_stream(UpperCAmelCase__ , UpperCAmelCase__ )
for item in iterator:
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = "f32le" , ) -> Any:
if stream_chunk_s is not None:
UpperCamelCase_: List[Any] = stream_chunk_s
else:
UpperCamelCase_: Dict = chunk_length_s
UpperCamelCase_: List[str] = ffmpeg_microphone(UpperCAmelCase__ , UpperCAmelCase__ , format_for_conversion=UpperCAmelCase__ )
if format_for_conversion == "s16le":
UpperCamelCase_: Union[str, Any] = np.intaa
UpperCamelCase_: List[Any] = 2
elif format_for_conversion == "f32le":
UpperCamelCase_: str = np.floataa
UpperCamelCase_: Tuple = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCamelCase_: int = chunk_length_s / 6
UpperCamelCase_: Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(UpperCAmelCase__ , (int, float) ):
UpperCamelCase_: Union[str, Any] = [stride_length_s, stride_length_s]
UpperCamelCase_: Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCamelCase_: Dict = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCamelCase_: Optional[int] = datetime.datetime.now()
UpperCamelCase_: Optional[int] = datetime.timedelta(seconds=UpperCAmelCase__ )
for item in chunk_bytes_iter(UpperCAmelCase__ , UpperCAmelCase__ , stride=(stride_left, stride_right) , stream=UpperCAmelCase__ ):
# Put everything back in numpy scale
UpperCamelCase_: Tuple = np.frombuffer(item['raw'] , dtype=UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
UpperCamelCase_: int = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False ) -> int:
UpperCamelCase_: str = b''
UpperCamelCase_: Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCamelCase_: List[str] = 0
for raw in iterator:
acc += raw
if stream and len(UpperCAmelCase__ ) < chunk_len:
UpperCamelCase_: Optional[Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(UpperCAmelCase__ ) >= chunk_len:
# We are flushing the accumulator
UpperCamelCase_: int = (_stride_left, stride_right)
UpperCamelCase_: Optional[Any] = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
UpperCamelCase_: Any = False
yield item
UpperCamelCase_: Optional[int] = stride_left
UpperCamelCase_: Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(UpperCAmelCase__ ) > stride_left:
UpperCamelCase_: int = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
UpperCamelCase_: Optional[Any] = False
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
UpperCamelCase_: Any = 2**2_4 # 16Mo
try:
with subprocess.Popen(UpperCAmelCase__ , stdout=subprocess.PIPE , bufsize=UpperCAmelCase__ ) as ffmpeg_process:
while True:
UpperCamelCase_: Any = ffmpeg_process.stdout.read(UpperCAmelCase__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error | 363 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=() , UpperCAmelCase__=None , UpperCAmelCase__="no" , UpperCAmelCase__="29500" ) -> List[Any]:
UpperCamelCase_: Any = False
UpperCamelCase_: List[str] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
UpperCamelCase_: List[Any] = True
elif "IPython" in sys.modules:
UpperCamelCase_: List[Any] = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
UpperCamelCase_: Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , UpperCAmelCase__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
UpperCamelCase_: List[str] = 8
UpperCamelCase_: str = PrepareForLaunch(UpperCAmelCase__ , distributed_type='TPU' )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*UpperCAmelCase__ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCAmelCase__ , master_addr='127.0.01' , master_port=UpperCAmelCase__ , mixed_precision=UpperCAmelCase__ ):
UpperCamelCase_: str = PrepareForLaunch(UpperCAmelCase__ , distributed_type='MULTI_GPU' )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCamelCase_: Tuple = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__=() , UpperCAmelCase__=2 ) -> Optional[int]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCAmelCase__ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
UpperCamelCase_: str = PrepareForLaunch(UpperCAmelCase__ , debug=UpperCAmelCase__ )
start_processes(UpperCAmelCase__ , args=UpperCAmelCase__ , nprocs=UpperCAmelCase__ , start_method='fork' ) | 292 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : List[str] = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[Any] = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = 384
SCREAMING_SNAKE_CASE__ : Tuple = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE__ : int = 96
SCREAMING_SNAKE_CASE__ : str = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE__ : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96
SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE__ : Tuple = 128
SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE__ : Optional[int] = 12
SCREAMING_SNAKE_CASE__ : Optional[int] = 512
elif "large" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 192
SCREAMING_SNAKE_CASE__ : int = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE__ : List[Any] = 12
SCREAMING_SNAKE_CASE__ : Optional[Any] = 768
# set label information
SCREAMING_SNAKE_CASE__ : Optional[Any] = 150
SCREAMING_SNAKE_CASE__ : Tuple = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = SwinConfig(
embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,)
SCREAMING_SNAKE_CASE__ : int = UperNetConfig(
backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,)
return config
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = val
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = x.shape
SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(_snake_case ,4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = x.shape
SCREAMING_SNAKE_CASE__ : Any = x.reshape(_snake_case ,in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : int = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE__ : Optional[int] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ,file_name=_snake_case )[
"""state_dict"""
]
for name, param in state_dict.items():
print(_snake_case ,param.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_upernet_config(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = UperNetForSemanticSegmentation(_snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(_snake_case )
if "bn" in key:
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""bn""" ,"""batch_norm""" )
SCREAMING_SNAKE_CASE__ : Dict = val
# rename keys
SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case ,_snake_case ,_snake_case )
read_in_q_k_v(_snake_case ,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case )
if "norm" in key:
SCREAMING_SNAKE_CASE__ : Tuple = reverse_correct_unfold_norm_order(_snake_case )
model.load_state_dict(_snake_case )
# verify on image
SCREAMING_SNAKE_CASE__ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(_snake_case ,return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits
print(logits.shape )
print("""First values of logits:""" ,logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("""Logits:""" ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 25 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
lowercase : int = None
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : str = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowercase : int = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
lowercase : Optional[int] = {
'google/bigbird-roberta-base': 40_96,
'google/bigbird-roberta-large': 40_96,
'google/bigbird-base-trivia-itc': 40_96,
}
lowercase : List[str] = '▁'
class A ( __snake_case ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = BigBirdTokenizer
__magic_name__ = ['''input_ids''', '''attention_mask''']
__magic_name__ = []
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="[MASK]" , SCREAMING_SNAKE_CASE="[CLS]" , **SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
A : Dict = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else bos_token
A : Any = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else eos_token
A : int = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else unk_token
A : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else pad_token
A : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else cls_token
A : Tuple = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A : Any = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
A : str = vocab_file
A : str = False if not self.vocab_file else True
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
A : Optional[Any] = [self.sep_token_id]
A : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
A : Any = [self.sep_token_id]
A : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A : Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 311 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[str] = 2
A : Dict = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(snake_case__ )
if n > 1:
factors.append(snake_case__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCamelCase ( self : Any ) -> Dict:
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = BlipImageProcessor()
_UpperCamelCase = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
_UpperCamelCase = BlipaProcessor(__UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self : Tuple , **__UpperCamelCase : List[Any] ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def _UpperCamelCase ( self : Any , **__UpperCamelCase : List[Any] ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def _UpperCamelCase ( self : Optional[Any] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self : List[Any] ) -> int:
_UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCamelCase = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self : Optional[int] ) -> str:
_UpperCamelCase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_UpperCamelCase = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
_UpperCamelCase = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def _UpperCamelCase ( self : Optional[int] ) -> Any:
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = image_processor(__UpperCAmelCase , return_tensors='''np''' )
_UpperCamelCase = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCamelCase ( self : int ) -> Optional[Any]:
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = processor(text=__UpperCAmelCase )
_UpperCamelCase = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
_UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase = processor.batch_decode(__UpperCAmelCase )
_UpperCamelCase = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def _UpperCamelCase ( self : List[Any] ) -> Optional[int]:
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 256 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=0.0_2 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = rotary_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = FlaxGPTJModelTester(self )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__UpperCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )
__UpperCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = False
__UpperCamelCase = model.config.eos_token_id
__UpperCamelCase = jax.jit(model.generate )
__UpperCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__UpperCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__UpperCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCAmelCase )
__UpperCamelCase = fx_state
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = model_class.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase )
__UpperCamelCase = fx_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = load_flax_weights_in_pytorch_model(__UpperCAmelCase , fx_model.params )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = pt_model_class.from_pretrained(__UpperCAmelCase , from_flax=__UpperCAmelCase )
with torch.no_grad():
__UpperCamelCase = pt_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCAmelCase )
| 316 | 0 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> str | Literal[False]:
"""simple docstring"""
a = list(snake_case_ )
a = list(snake_case_ )
a = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
a = '''_'''
if count > 1:
return False
else:
return "".join(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> list[str]:
"""simple docstring"""
a = []
while True:
a = ['''$'''] * len(snake_case_ )
a = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1, len(snake_case_ ) ):
a = compare_string(binary[i], binary[j] )
if k is False:
a = '''*'''
a = '''*'''
temp.append('''X''' )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
a = list(set(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[str]:
"""simple docstring"""
a = []
for minterm in minterms:
a = ''''''
for _ in range(snake_case_ ):
a = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> bool:
"""simple docstring"""
a = list(snake_case_ )
a = list(snake_case_ )
a = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[str]:
"""simple docstring"""
a = []
a = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
a = 0
a = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
a = j
if count == 1:
a = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
a = 0
temp.append(prime_implicants[i] )
while True:
a = 0
a = -1
a = 0
for i in range(len(snake_case_ ) ):
a = chart[i].count(1 )
if count_n > max_n:
a = count_n
a = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
a = 0
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[list[int]]:
"""simple docstring"""
a = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
a = prime_implicants[i].count('''_''' )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i], binary[j], snake_case_ ):
a = 1
return chart
def SCREAMING_SNAKE_CASE__ ( ) -> None:
"""simple docstring"""
a = int(input('''Enter the no. of variables\n''' ) )
a = [
float(snake_case_ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
a = decimal_to_binary(snake_case_, snake_case_ )
a = check(snake_case_ )
print('''Prime Implicants are:''' )
print(snake_case_ )
a = prime_implicant_chart(snake_case_, snake_case_ )
a = selection(snake_case_, snake_case_ )
print('''Essential Prime Implicants are:''' )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 330 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> str | Literal[False]:
"""simple docstring"""
a = list(snake_case_ )
a = list(snake_case_ )
a = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
a = '''_'''
if count > 1:
return False
else:
return "".join(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> list[str]:
"""simple docstring"""
a = []
while True:
a = ['''$'''] * len(snake_case_ )
a = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1, len(snake_case_ ) ):
a = compare_string(binary[i], binary[j] )
if k is False:
a = '''*'''
a = '''*'''
temp.append('''X''' )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
a = list(set(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[str]:
"""simple docstring"""
a = []
for minterm in minterms:
a = ''''''
for _ in range(snake_case_ ):
a = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> bool:
"""simple docstring"""
a = list(snake_case_ )
a = list(snake_case_ )
a = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[str]:
"""simple docstring"""
a = []
a = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
a = 0
a = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
a = j
if count == 1:
a = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
a = 0
temp.append(prime_implicants[i] )
while True:
a = 0
a = -1
a = 0
for i in range(len(snake_case_ ) ):
a = chart[i].count(1 )
if count_n > max_n:
a = count_n
a = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
a = 0
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[list[int]]:
"""simple docstring"""
a = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
a = prime_implicants[i].count('''_''' )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i], binary[j], snake_case_ ):
a = 1
return chart
def SCREAMING_SNAKE_CASE__ ( ) -> None:
"""simple docstring"""
a = int(input('''Enter the no. of variables\n''' ) )
a = [
float(snake_case_ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
a = decimal_to_binary(snake_case_, snake_case_ )
a = check(snake_case_ )
print('''Prime Implicants are:''' )
print(snake_case_ )
a = prime_implicant_chart(snake_case_, snake_case_ )
a = selection(snake_case_, snake_case_ )
print('''Essential Prime Implicants are:''' )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 330 | 1 |
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
a__ : int = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
a__ : Dict = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bootstrap_aggregation=SCREAMING_SNAKE_CASE__ , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bootstrap_aggregation=SCREAMING_SNAKE_CASE__ , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def _UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = """rougeLsum"""
UpperCamelCase__ = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , newline_sep=SCREAMING_SNAKE_CASE__ , rouge_keys=[k] )[k]
UpperCamelCase__ = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , newline_sep=SCREAMING_SNAKE_CASE__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = ["""rouge1""", """rouge2""", """rougeL"""]
UpperCamelCase__ = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , newline_sep=SCREAMING_SNAKE_CASE__ , rouge_keys=SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , newline_sep=SCREAMING_SNAKE_CASE__ , rouge_keys=SCREAMING_SNAKE_CASE__ )
assert score_sep == score_no_sep
def _UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
UpperCamelCase__ = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , newline_sep=SCREAMING_SNAKE_CASE__ ) == calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , newline_sep=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
UpperCamelCase__ = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
UpperCamelCase__ = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rouge_keys=["rougeLsum"] , newline_sep=SCREAMING_SNAKE_CASE__ )["""rougeLsum"""]
UpperCamelCase__ = calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rouge_keys=["rougeLsum"] )["""rougeLsum"""]
assert new_score > prev_score
def _UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = Path("examples/seq2seq/test_data/wmt_en_ro" )
UpperCamelCase__ = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=SCREAMING_SNAKE_CASE__ )
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 80 |
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = []
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : str = []
for rt in rc.restypes:
_SCREAMING_SNAKE_CASE : Optional[int] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_SCREAMING_SNAKE_CASE : Optional[Any] = {name: i for i, name in enumerate(SCREAMING_SNAKE_CASE__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
_SCREAMING_SNAKE_CASE : Dict = torch.tensor(
SCREAMING_SNAKE_CASE__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
SCREAMING_SNAKE_CASE__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
_SCREAMING_SNAKE_CASE : str = torch.tensor(
SCREAMING_SNAKE_CASE__ , dtype=torch.floataa , device=protein["""aatype"""].device , )
_SCREAMING_SNAKE_CASE : int = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_SCREAMING_SNAKE_CASE : List[str] = restype_atomaa_to_atomaa[protein_aatype]
_SCREAMING_SNAKE_CASE : str = restype_atomaa_mask[protein_aatype]
_SCREAMING_SNAKE_CASE : List[Any] = residx_atomaa_mask
_SCREAMING_SNAKE_CASE : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_SCREAMING_SNAKE_CASE : int = restype_atomaa_to_atomaa[protein_aatype]
_SCREAMING_SNAKE_CASE : Dict = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_SCREAMING_SNAKE_CASE : str = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
_SCREAMING_SNAKE_CASE : int = rc.restype_atoa[restype_letter]
_SCREAMING_SNAKE_CASE : Dict = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_SCREAMING_SNAKE_CASE : List[Any] = rc.atom_order[atom_name]
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
_SCREAMING_SNAKE_CASE : int = restype_atomaa_mask[protein_aatype]
_SCREAMING_SNAKE_CASE : int = residx_atomaa_mask
return protein
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = tree_map(lambda SCREAMING_SNAKE_CASE__ : torch.tensor(SCREAMING_SNAKE_CASE__ , device=batch["""aatype"""].device ) , SCREAMING_SNAKE_CASE__ , np.ndarray )
_SCREAMING_SNAKE_CASE : Optional[Any] = tensor_tree_map(lambda SCREAMING_SNAKE_CASE__ : np.array(SCREAMING_SNAKE_CASE__ ) , make_atomaa_masks(SCREAMING_SNAKE_CASE__ ) )
return out
| 200 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def _lowerCAmelCase ( lowercase_ ):
if num <= 0:
UpperCAmelCase = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(lowercase_ )
UpperCAmelCase = [True] * (num + 1)
UpperCAmelCase = []
UpperCAmelCase = 2
UpperCAmelCase = int(math.sqrt(lowercase_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowercase_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowercase_ ):
if sieve[i] is True:
UpperCAmelCase = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowercase_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 181 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _lowerCAmelCase ( lowercase_ ):
random.seed(lowercase_ )
np.random.seed(lowercase_ )
torch.manual_seed(lowercase_ )
torch.cuda.manual_seed_all(lowercase_ )
# ^^ safe to call this function even if cuda is not available
class A_ :
"""simple docstring"""
def __init__( self :Any , lowercase_ :Iterable[torch.nn.Parameter] , lowercase_ :float = 0.9999 , lowercase_ :float = 0.0 , lowercase_ :int = 0 , lowercase_ :bool = False , lowercase_ :Union[float, int] = 1.0 , lowercase_ :Union[float, int] = 2 / 3 , lowercase_ :Optional[Any] = None , lowercase_ :Dict[str, Any] = None , **lowercase_ :Dict , ) -> Optional[int]:
if isinstance(lowercase_ , torch.nn.Module ):
UpperCAmelCase = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage`' , '1.0.0' , lowercase_ , standard_warn=lowercase_ , )
UpperCAmelCase = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
UpperCAmelCase = True
if kwargs.get('max_value' , lowercase_ ) is not None:
UpperCAmelCase = 'The `max_value` argument is deprecated. Please use `decay` instead.'
deprecate('max_value' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
UpperCAmelCase = kwargs['max_value']
if kwargs.get('min_value' , lowercase_ ) is not None:
UpperCAmelCase = 'The `min_value` argument is deprecated. Please use `min_decay` instead.'
deprecate('min_value' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
UpperCAmelCase = kwargs['min_value']
UpperCAmelCase = list(lowercase_ )
UpperCAmelCase = [p.clone().detach() for p in parameters]
if kwargs.get('device' , lowercase_ ) is not None:
UpperCAmelCase = 'The `device` argument is deprecated. Please use `to` instead.'
deprecate('device' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
self.to(device=kwargs['device'] )
UpperCAmelCase = None
UpperCAmelCase = decay
UpperCAmelCase = min_decay
UpperCAmelCase = update_after_step
UpperCAmelCase = use_ema_warmup
UpperCAmelCase = inv_gamma
UpperCAmelCase = power
UpperCAmelCase = 0
UpperCAmelCase = None # set in `step()`
UpperCAmelCase = model_cls
UpperCAmelCase = model_config
@classmethod
def UpperCAmelCase__ ( cls :int , lowercase_ :Union[str, Any] , lowercase_ :Any ) -> "EMAModel":
UpperCAmelCase , UpperCAmelCase = model_cls.load_config(lowercase_ , return_unused_kwargs=lowercase_ )
UpperCAmelCase = model_cls.from_pretrained(lowercase_ )
UpperCAmelCase = cls(model.parameters() , model_cls=lowercase_ , model_config=model.config )
ema_model.load_state_dict(lowercase_ )
return ema_model
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :List[str] ) -> int:
if self.model_cls is None:
raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' )
if self.model_config is None:
raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' )
UpperCAmelCase = self.model_cls.from_config(self.model_config )
UpperCAmelCase = self.state_dict()
state_dict.pop('shadow_params' , lowercase_ )
model.register_to_config(**lowercase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :int ) -> float:
UpperCAmelCase = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
UpperCAmelCase = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
UpperCAmelCase = (1 + step) / (10 + step)
UpperCAmelCase = min(lowercase_ , self.decay )
# make sure decay is not smaller than min_decay
UpperCAmelCase = max(lowercase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Iterable[torch.nn.Parameter] ) -> Optional[int]:
if isinstance(lowercase_ , torch.nn.Module ):
UpperCAmelCase = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' , '1.0.0' , lowercase_ , standard_warn=lowercase_ , )
UpperCAmelCase = parameters.parameters()
UpperCAmelCase = list(lowercase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
UpperCAmelCase = self.get_decay(self.optimization_step )
UpperCAmelCase = decay
UpperCAmelCase = 1 - decay
UpperCAmelCase = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowercase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
UpperCAmelCase = deepspeed.zero.GatheredParameters(lowercase_ , modifier_rank=lowercase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowercase_ )
def UpperCAmelCase__ ( self :Tuple , lowercase_ :Iterable[torch.nn.Parameter] ) -> None:
UpperCAmelCase = list(lowercase_ )
for s_param, param in zip(self.shadow_params , lowercase_ ):
param.data.copy_(s_param.to(param.device ).data )
def UpperCAmelCase__ ( self :Dict , lowercase_ :Tuple=None , lowercase_ :Union[str, Any]=None ) -> None:
UpperCAmelCase = [
p.to(device=lowercase_ , dtype=lowercase_ ) if p.is_floating_point() else p.to(device=lowercase_ )
for p in self.shadow_params
]
def UpperCAmelCase__ ( self :Union[str, Any] ) -> dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Iterable[torch.nn.Parameter] ) -> None:
UpperCAmelCase = [param.detach().cpu().clone() for param in parameters]
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Iterable[torch.nn.Parameter] ) -> None:
if self.temp_stored_params is None:
raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' )
for c_param, param in zip(self.temp_stored_params , lowercase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
UpperCAmelCase = None
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :dict ) -> None:
UpperCAmelCase = copy.deepcopy(lowercase_ )
UpperCAmelCase = state_dict.get('decay' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1' )
UpperCAmelCase = state_dict.get('min_decay' , self.min_decay )
if not isinstance(self.min_decay , lowercase_ ):
raise ValueError('Invalid min_decay' )
UpperCAmelCase = state_dict.get('optimization_step' , self.optimization_step )
if not isinstance(self.optimization_step , lowercase_ ):
raise ValueError('Invalid optimization_step' )
UpperCAmelCase = state_dict.get('update_after_step' , self.update_after_step )
if not isinstance(self.update_after_step , lowercase_ ):
raise ValueError('Invalid update_after_step' )
UpperCAmelCase = state_dict.get('use_ema_warmup' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowercase_ ):
raise ValueError('Invalid use_ema_warmup' )
UpperCAmelCase = state_dict.get('inv_gamma' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('Invalid inv_gamma' )
UpperCAmelCase = state_dict.get('power' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('Invalid power' )
UpperCAmelCase = state_dict.get('shadow_params' , lowercase_ )
if shadow_params is not None:
UpperCAmelCase = shadow_params
if not isinstance(self.shadow_params , lowercase_ ):
raise ValueError('shadow_params must be a list' )
if not all(isinstance(lowercase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('shadow_params must all be Tensors' )
| 181 | 1 |
import math
def __magic_name__ ( __a : int ):
'''simple docstring'''
return math.sqrt(__snake_case ) * math.sqrt(__snake_case ) == num
def __magic_name__ ( __a : int ):
'''simple docstring'''
UpperCamelCase__ = 0
UpperCamelCase__ = n
while left <= right:
UpperCamelCase__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCamelCase__ = mid - 1
else:
UpperCamelCase__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 244 |
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__A : Union[str, Any] = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_UpperCAmelCase , cache_dir=_UpperCAmelCase)
__A : Optional[Any] = [t[-1] for t in os.walk(os.path.join(_UpperCAmelCase , os.listdir(_UpperCAmelCase)[0] , 'snapshots'))]
__A : int = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin') for f in files)
@slow
@require_flax
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_UpperCAmelCase)
__A : Dict = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__A : Optional[Any] = jax.random.PRNGKey(0)
__A : int = 4
__A : Tuple = jax.device_count()
__A : Union[str, Any] = num_samples * [prompt]
__A : Tuple = pipeline.prepare_inputs(_UpperCAmelCase)
# shard inputs and rng
__A : str = replicate(_UpperCAmelCase)
__A : Tuple = jax.random.split(_UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = shard(_UpperCAmelCase)
__A : Union[str, Any] = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1514745) < 1e-3
assert np.abs(np.abs(_UpperCAmelCase , dtype=np.floataa).sum() - 49947.875) < 5e-1
__A : List[str] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(_UpperCAmelCase) == num_samples
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_UpperCAmelCase)
__A : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__A : Tuple = jax.random.PRNGKey(0)
__A : Any = 50
__A : str = jax.device_count()
__A : Union[str, Any] = num_samples * [prompt]
__A : List[str] = pipeline.prepare_inputs(_UpperCAmelCase)
# shard inputs and rng
__A : Dict = replicate(_UpperCAmelCase)
__A : Optional[Any] = jax.random.split(_UpperCAmelCase , _UpperCAmelCase)
__A : int = shard(_UpperCAmelCase)
__A : Tuple = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.05652401)) < 1e-3
assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa).sum() - 2383808.2)) < 5e-1
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_UpperCAmelCase)
__A : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__A : str = jax.random.PRNGKey(0)
__A : Any = 50
__A : Optional[int] = jax.device_count()
__A : int = num_samples * [prompt]
__A : Optional[int] = pipeline.prepare_inputs(_UpperCAmelCase)
# shard inputs and rng
__A : Optional[int] = replicate(_UpperCAmelCase)
__A : List[str] = jax.random.split(_UpperCAmelCase , _UpperCAmelCase)
__A : Dict = shard(_UpperCAmelCase)
__A : str = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04003906)) < 1e-3
assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa).sum() - 2373516.75)) < 5e-1
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa)
__A : Union[str, Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__A : Any = jax.random.PRNGKey(0)
__A : List[str] = 50
__A : Optional[int] = jax.device_count()
__A : List[Any] = num_samples * [prompt]
__A : List[Any] = pipeline.prepare_inputs(_UpperCAmelCase)
# shard inputs and rng
__A : Union[str, Any] = replicate(_UpperCAmelCase)
__A : Optional[Any] = jax.random.split(_UpperCAmelCase , _UpperCAmelCase)
__A : List[str] = shard(_UpperCAmelCase)
__A : int = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04003906)) < 1e-3
assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa).sum() - 2373516.75)) < 5e-1
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , )
__A ,__A : Any = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , )
__A : Optional[Any] = scheduler.create_state()
__A : Any = scheduler_state
__A : List[str] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__A : Union[str, Any] = jax.random.PRNGKey(0)
__A : Optional[int] = 50
__A : Optional[Any] = jax.device_count()
__A : Any = num_samples * [prompt]
__A : Optional[Any] = pipeline.prepare_inputs(_UpperCAmelCase)
# shard inputs and rng
__A : int = replicate(_UpperCAmelCase)
__A : Any = jax.random.split(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = shard(_UpperCAmelCase)
__A : Union[str, Any] = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.045043945)) < 1e-3
assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa).sum() - 2347693.5)) < 5e-1
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__A : int = jax.device_count()
__A : List[Any] = num_samples * [prompt]
__A : List[Any] = jax.random.split(jax.random.PRNGKey(0) , _UpperCAmelCase)
__A ,__A : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_UpperCAmelCase , )
__A : str = replicate(_UpperCAmelCase)
__A : str = pipeline.prepare_inputs(_UpperCAmelCase)
__A : str = shard(_UpperCAmelCase)
__A : int = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase).images
assert images.shape == (num_samples, 1, 512, 512, 3)
__A : Any = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
__A ,__A : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_UpperCAmelCase , use_memory_efficient_attention=_UpperCAmelCase , )
__A : Any = replicate(_UpperCAmelCase)
__A : List[Any] = pipeline.prepare_inputs(_UpperCAmelCase)
__A : Optional[Any] = shard(_UpperCAmelCase)
__A : List[Any] = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
__A : List[Any] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1e-2 | 190 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
UpperCamelCase__ : Union[str, Any] = None
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase__ : int = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""",
},
}
UpperCamelCase__ : List[str] = {
"""camembert-base""": 512,
}
UpperCamelCase__ : List[Any] = """▁"""
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE_ = CamembertTokenizer
def __init__( self : Optional[Any] ,__lowerCamelCase : Optional[int]=None ,__lowerCamelCase : List[Any]=None ,__lowerCamelCase : List[Any]="<s>" ,__lowerCamelCase : Optional[Any]="</s>" ,__lowerCamelCase : Union[str, Any]="</s>" ,__lowerCamelCase : int="<s>" ,__lowerCamelCase : Any="<unk>" ,__lowerCamelCase : Optional[int]="<pad>" ,__lowerCamelCase : Optional[int]="<mask>" ,__lowerCamelCase : str=["<s>NOTUSED", "</s>NOTUSED"] ,**__lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else mask_token
super().__init__(
__lowerCamelCase ,tokenizer_file=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,additional_special_tokens=__lowerCamelCase ,**__lowerCamelCase ,)
a = vocab_file
a = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a = [self.cls_token_id]
a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : str ,__lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file ,__lowerCamelCase )
return (out_vocab_file,)
| 330 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = pd.read_csv("""sample_data.csv""", header=None)
UpperCamelCase__ : Tuple = df.shape[:1][0]
# If you're using some other dataset input the target column
UpperCamelCase__ : List[Any] = df.iloc[:, 1:2]
UpperCamelCase__ : Union[str, Any] = actual_data.values.reshape(len_data, 1)
UpperCamelCase__ : List[Any] = MinMaxScaler().fit_transform(actual_data)
UpperCamelCase__ : Optional[Any] = 10
UpperCamelCase__ : int = 5
UpperCamelCase__ : List[str] = 20
UpperCamelCase__ : Optional[int] = len_data - periods * look_back
UpperCamelCase__ : Union[str, Any] = actual_data[:division]
UpperCamelCase__ : str = actual_data[division - look_back :]
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = [], []
UpperCamelCase__ , UpperCamelCase__ : str = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
UpperCamelCase__ : List[str] = np.array(train_x)
UpperCamelCase__ : Optional[Any] = np.array(test_x)
UpperCamelCase__ : Tuple = np.array([list(i.ravel()) for i in train_y])
UpperCamelCase__ : Optional[Any] = np.array([list(i.ravel()) for i in test_y])
UpperCamelCase__ : Union[str, Any] = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
UpperCamelCase__ : Tuple = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
UpperCamelCase__ : Tuple = model.predict(x_test)
| 330 | 1 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_lowercase : Optional[Any] = (
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
_lowercase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Tuple = '''https://pypi.org/pypi/diffusers/json'''
lowercase_ : Tuple = json.loads(request.urlopen(__SCREAMING_SNAKE_CASE ).read() )['''releases'''].keys()
return sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : version.Version(__SCREAMING_SNAKE_CASE ) )
def snake_case_ ( ):
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__SCREAMING_SNAKE_CASE )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = Path(__SCREAMING_SNAKE_CASE ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] ):
"""simple docstring"""
init_hf_modules()
lowercase_ : Optional[int] = Path(__SCREAMING_SNAKE_CASE ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
lowercase_ : str = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : int = f.read()
# Imports of the form `import .xxx`
lowercase_ : List[Any] = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Unique-ify
return list(set(__SCREAMING_SNAKE_CASE ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : int = False
lowercase_ : Any = [module_file]
lowercase_ : Dict = []
# Let's recurse through all relative imports
while not no_change:
lowercase_ : Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Union[str, Any] = Path(__SCREAMING_SNAKE_CASE ).parent
lowercase_ : Optional[int] = [str(module_path / m ) for m in new_imports]
lowercase_ : str = [f for f in new_import_files if f not in all_relative_imports]
lowercase_ : int = [F'''{f}.py''' for f in new_import_files]
lowercase_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) == 0
all_relative_imports.extend(__SCREAMING_SNAKE_CASE )
return all_relative_imports
def snake_case_ ( __SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ : Union[str, Any] = f.read()
# Imports of the form `import xxx`
lowercase_ : Any = re.findall('''^\s*import\s+(\S+)\s*$''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Only keep the top-level module
lowercase_ : List[str] = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
lowercase_ : Any = list(set(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Optional[Any] = []
for imp in imports:
try:
importlib.import_module(__SCREAMING_SNAKE_CASE )
except ImportError:
missing_packages.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F'''{', '.join(__SCREAMING_SNAKE_CASE )}. Run `pip install {' '.join(__SCREAMING_SNAKE_CASE )}`''' )
return get_relative_imports(__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
lowercase_ : List[Any] = module_path.replace(os.path.sep , '''.''' )
lowercase_ : Any = importlib.import_module(__SCREAMING_SNAKE_CASE )
if class_name is None:
return find_pipeline_class(__SCREAMING_SNAKE_CASE )
return getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
lowercase_ : int = dict(inspect.getmembers(__SCREAMING_SNAKE_CASE , inspect.isclass ) )
lowercase_ : Optional[Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __SCREAMING_SNAKE_CASE )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
lowercase_ : List[Any] = cls
return pipeline_class
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : bool = False , ):
"""simple docstring"""
lowercase_ : Dict = str(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if os.path.isfile(__SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = module_file_or_url
lowercase_ : int = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
lowercase_ : Optional[int] = get_diffusers_versions()
# cut ".dev0"
lowercase_ : List[Any] = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
lowercase_ : List[str] = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
lowercase_ : List[str] = F'''v{revision}'''
elif revision == "main":
lowercase_ : Optional[Any] = revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {', '.join(available_versions + ['main'] )}.''' )
# community pipeline on GitHub
lowercase_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=__SCREAMING_SNAKE_CASE , pipeline=__SCREAMING_SNAKE_CASE )
try:
lowercase_ : Optional[Any] = cached_download(
__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , )
lowercase_ : Tuple = '''git'''
lowercase_ : Tuple = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
lowercase_ : str = hf_hub_download(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[Any] = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
lowercase_ : Tuple = check_imports(__SCREAMING_SNAKE_CASE )
# Now we move the module inside our cached dynamic modules.
lowercase_ : str = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = Path(__SCREAMING_SNAKE_CASE ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__SCREAMING_SNAKE_CASE , submodule_path / module_file )
for module_needed in modules_needed:
lowercase_ : Union[str, Any] = F'''{module_needed}.py'''
shutil.copy(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = use_auth_token
elif use_auth_token is True:
lowercase_ : List[Any] = HfFolder.get_token()
else:
lowercase_ : Optional[Any] = None
lowercase_ : Optional[int] = model_info(__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , token=__SCREAMING_SNAKE_CASE ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowercase_ : int = submodule_path / commit_hash
lowercase_ : Tuple = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__SCREAMING_SNAKE_CASE )
if not (submodule_path / module_file).exists():
shutil.copy(__SCREAMING_SNAKE_CASE , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__SCREAMING_SNAKE_CASE , F'''{module_needed}.py''' , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , )
return os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
"""simple docstring"""
lowercase_ : Optional[Any] = get_cached_module_file(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , force_download=__SCREAMING_SNAKE_CASE , resume_download=__SCREAMING_SNAKE_CASE , proxies=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE , local_files_only=__SCREAMING_SNAKE_CASE , )
return get_class_in_module(__SCREAMING_SNAKE_CASE , final_module.replace('''.py''' , '''''' ) )
| 93 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42 # [batch_size x 3]
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def lowercase_ ( self ) -> torch.Tensor:
'''simple docstring'''
__lowerCamelCase = torch.arange(self.height * self.width )
__lowerCamelCase = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase__ , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase , *__lowerCamelCase = self.shape
__lowerCamelCase = int(np.prod(lowerCamelCase__ ) )
__lowerCamelCase = self.get_image_coords()
__lowerCamelCase = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__lowerCamelCase = self.get_camera_rays(lowerCamelCase__ )
__lowerCamelCase = rays.view(lowerCamelCase__ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def lowercase_ ( self , lowerCamelCase__ ) -> torch.Tensor:
'''simple docstring'''
__lowerCamelCase , *__lowerCamelCase , __lowerCamelCase = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__lowerCamelCase = coords.view(lowerCamelCase__ , -1 , 2 )
__lowerCamelCase = self.resolution()
__lowerCamelCase = self.fov()
__lowerCamelCase = (flat.float() / (res - 1)) * 2 - 1
__lowerCamelCase = fracs * torch.tan(fov / 2 )
__lowerCamelCase = fracs.view(lowerCamelCase__ , -1 , 2 )
__lowerCamelCase = (
self.z.view(lowerCamelCase__ , 1 , 3 )
+ self.x.view(lowerCamelCase__ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase__ , 1 , 3 ) * fracs[:, :, 1:]
)
__lowerCamelCase = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase__ )
__lowerCamelCase = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase__ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCamelCase__ , *lowerCamelCase__ , 2 , 3 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> "DifferentiableProjectiveCamera":
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase__ , height=lowerCamelCase__ , x_fov=self.x_fov , y_fov=self.y_fov , )
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
__lowerCamelCase = np.array([np.sin(UpperCamelCase__ ), np.cos(UpperCamelCase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
__lowerCamelCase = -z * 4
__lowerCamelCase = np.array([np.cos(UpperCamelCase__ ), -np.sin(UpperCamelCase__ ), 0.0] )
__lowerCamelCase = np.cross(UpperCamelCase__ , UpperCamelCase__ )
origins.append(UpperCamelCase__ )
xs.append(UpperCamelCase__ )
ys.append(UpperCamelCase__ )
zs.append(UpperCamelCase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(UpperCamelCase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(UpperCamelCase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(UpperCamelCase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(UpperCamelCase__ , axis=0 ) ).float() , width=UpperCamelCase__ , height=UpperCamelCase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(UpperCamelCase__ )) , )
| 90 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class lowercase_ ( __lowercase ):
UpperCamelCase_ : Any = "data2vec-text"
def __init__( self : List[str] , A__ : Any=30522 , A__ : Optional[Any]=768 , A__ : Union[str, Any]=12 , A__ : Union[str, Any]=12 , A__ : List[Any]=3072 , A__ : Union[str, Any]="gelu" , A__ : str=0.1 , A__ : Dict=0.1 , A__ : Any=512 , A__ : Optional[int]=2 , A__ : Dict=0.02 , A__ : List[str]=1e-12 , A__ : int=1 , A__ : Optional[int]=0 , A__ : List[str]=2 , A__ : str="absolute" , A__ : Optional[Any]=True , A__ : List[Any]=None , **A__ : List[Any] , ) -> Tuple:
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = use_cache
_snake_case = classifier_dropout
class lowercase_ ( __lowercase ):
@property
def UpperCamelCase_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_snake_case = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_snake_case = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 278 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = '''▁'''
__A = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__A = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
__A = {
'''facebook/m2m100_418M''': 10_24,
}
# fmt: off
__A = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class lowercase_ ( __lowercase ):
UpperCamelCase_ : str = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Union[str, Any] = ["input_ids", "attention_mask"]
UpperCamelCase_ : List[int] = []
UpperCamelCase_ : List[int] = []
def __init__( self : str , A__ : str , A__ : Optional[Any] , A__ : Union[str, Any]=None , A__ : Dict=None , A__ : Any="<s>" , A__ : Union[str, Any]="</s>" , A__ : Tuple="</s>" , A__ : Dict="<pad>" , A__ : List[Any]="<unk>" , A__ : str="m2m100" , A__ : Optional[Dict[str, Any]] = None , A__ : List[Any]=8 , **A__ : Union[str, Any] , ) -> None:
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
_snake_case = language_codes
_snake_case = FAIRSEQ_LANGUAGE_CODES[language_codes]
_snake_case = {lang_code: f"""__{lang_code}__""" for lang_code in fairseq_language_code}
_snake_case = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A__ )
for lang_code in fairseq_language_code
if self.get_lang_token(A__ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A__ , tgt_lang=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , unk_token=A__ , pad_token=A__ , language_codes=A__ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A__ , **A__ , )
_snake_case = vocab_file
_snake_case = load_json(A__ )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = spm_file
_snake_case = load_spm(A__ , self.sp_model_kwargs )
_snake_case = len(self.encoder )
_snake_case = {
self.get_lang_token(A__ ): self.encoder_size + i for i, lang_code in enumerate(A__ )
}
_snake_case = {lang_code: self.encoder_size + i for i, lang_code in enumerate(A__ )}
_snake_case = {v: k for k, v in self.lang_token_to_id.items()}
_snake_case = src_lang if src_lang is not None else '''en'''
_snake_case = tgt_lang
_snake_case = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_snake_case = num_madeup_words
@property
def UpperCamelCase_ ( self : int ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCamelCase_ ( self : Dict ) -> str:
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self : List[str] , A__ : str ) -> None:
_snake_case = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self : Any , A__ : str ) -> List[str]:
return self.sp_model.encode(A__ , out_type=A__ )
def UpperCamelCase_ ( self : Optional[int] , A__ : Dict ) -> str:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A__ , self.encoder[self.unk_token] )
def UpperCamelCase_ ( self : Union[str, Any] , A__ : int ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A__ , self.unk_token )
def UpperCamelCase_ ( self : Optional[int] , A__ : Optional[int] ) -> List[Any]:
_snake_case = []
_snake_case = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A__ ) + token
_snake_case = []
else:
current_sub_tokens.append(A__ )
out_string += self.sp_model.decode(A__ )
return out_string.strip()
def UpperCamelCase_ ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None , A__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
_snake_case = [1] * len(self.prefix_tokens )
_snake_case = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A__ )) + suffix_ones
return prefix_ones + ([0] * len(A__ )) + ([0] * len(A__ )) + suffix_ones
def UpperCamelCase_ ( self : Tuple , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self : str ) -> Dict:
_snake_case = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Dict:
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self : Union[str, Any] , A__ : Dict ) -> None:
_snake_case = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_snake_case = {}
_snake_case = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase_ ( self : Any , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
_snake_case = Path(A__ )
if not save_dir.is_dir():
raise OSError(f"""{save_directory} should be a directory""" )
_snake_case = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_snake_case = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , A__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A__ )
elif not os.path.isfile(self.spm_file ):
with open(A__ , '''wb''' ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (str(A__ ), str(A__ ))
def UpperCamelCase_ ( self : Optional[int] , A__ : List[str] , A__ : str = "en" , A__ : Optional[List[str]] = None , A__ : str = "ro" , **A__ : List[Any] , ) -> BatchEncoding:
_snake_case = src_lang
_snake_case = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A__ , A__ , **A__ )
def UpperCamelCase_ ( self : List[str] , A__ : int , A__ : Optional[str] , A__ : Optional[str] , **A__ : Union[str, Any] ) -> Tuple:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_snake_case = src_lang
_snake_case = self(A__ , add_special_tokens=A__ , **A__ )
_snake_case = self.get_lang_id(A__ )
_snake_case = tgt_lang_id
return inputs
def UpperCamelCase_ ( self : Dict ) -> Optional[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self : Optional[Any] ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self : List[Any] , A__ : str ) -> None:
_snake_case = self.get_lang_token(A__ )
_snake_case = self.lang_token_to_id[lang_token]
_snake_case = [self.cur_lang_id]
_snake_case = [self.eos_token_id]
def UpperCamelCase_ ( self : List[str] , A__ : str ) -> None:
_snake_case = self.get_lang_token(A__ )
_snake_case = self.lang_token_to_id[lang_token]
_snake_case = [self.cur_lang_id]
_snake_case = [self.eos_token_id]
def UpperCamelCase_ ( self : Dict , A__ : str ) -> str:
return self.lang_code_to_token[lang]
def UpperCamelCase_ ( self : Tuple , A__ : str ) -> int:
_snake_case = self.get_lang_token(A__ )
return self.lang_token_to_id[lang_token]
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
_snake_case = sentencepiece.SentencePieceProcessor(**_UpperCamelCase )
spm.Load(str(_UpperCamelCase ) )
return spm
def snake_case_(_UpperCamelCase ) -> Union[Dict, List]:
"""simple docstring"""
with open(_UpperCamelCase , '''r''' ) as f:
return json.load(_UpperCamelCase )
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> None:
"""simple docstring"""
with open(_UpperCamelCase , '''w''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase , indent=2 )
| 278 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
__lowerCamelCase : List[Any] = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
__lowerCamelCase : List[Any] = hex_num[0] == '-'
if is_negative:
__lowerCamelCase : Dict = hex_num[1:]
try:
__lowerCamelCase : List[str] = int(lowerCamelCase__ , 1_6 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
__lowerCamelCase : Tuple = ''
while int_num > 0:
__lowerCamelCase : List[Any] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[Any] = ['''image_processor''', '''tokenizer''']
_UpperCAmelCase : Union[str, Any] = '''Pix2StructImageProcessor'''
_UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : List[Any] = False
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def __call__( self : str ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = False ,SCREAMING_SNAKE_CASE__ : Union[bool, str, TruncationStrategy] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = 2_0_4_8 ,SCREAMING_SNAKE_CASE__ : int = 0 ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
if images is None and text is None:
raise ValueError('You have to specify either images or text.')
# Get only text
if images is None and not self.image_processor.is_vqa:
__lowerCamelCase : Tuple = self.tokenizer
__lowerCamelCase : Dict = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__lowerCamelCase : List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,max_patches=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
else:
# add pixel_values and bbox
__lowerCamelCase : List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,max_patches=SCREAMING_SNAKE_CASE__ ,header_text=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is not None and not self.image_processor.is_vqa:
__lowerCamelCase : List[Any] = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
if "attention_mask" in text_encoding:
__lowerCamelCase : List[Any] = text_encoding.pop('attention_mask')
if "input_ids" in text_encoding:
__lowerCamelCase : Dict = text_encoding.pop('input_ids')
else:
__lowerCamelCase : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE__)
return encoding_image_processor
def lowerCAmelCase ( self : Dict ,*SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : int):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Dict):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = self.tokenizer.model_input_names
__lowerCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 73 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 362 | """simple docstring"""
def UpperCAmelCase ( UpperCamelCase__ = 4_000_000 ):
"""simple docstring"""
A__ = [0, 1]
A__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
A__ = 0
for j in range(len(UpperCamelCase__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 154 | 0 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 16 ) -> Dict:
lowerCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase__ : Optional[Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE_ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ : Dict = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : int = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ : List[Any] = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ : Optional[Any] = 8
else:
lowerCAmelCase__ : Optional[int] = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding='longest' , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCAmelCase__ : Optional[int] = DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
# Initialize accelerator
lowerCAmelCase__ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : Optional[Any] = config['lr']
lowerCAmelCase__ : Optional[Any] = int(config['num_epochs'] )
lowerCAmelCase__ : List[Any] = int(config['seed'] )
lowerCAmelCase__ : Any = int(config['batch_size'] )
lowerCAmelCase__ : Optional[int] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowerCAmelCase__ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCAmelCase__ : List[str] = batch_size // MAX_GPU_BATCH_SIZE
lowerCAmelCase__ : Optional[int] = MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : int = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ : int = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
lowerCAmelCase__ : List[str] = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCAmelCase__ : Any = model(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = outputs.loss
lowerCAmelCase__ : str = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ : int = model(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : List[str] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowerCAmelCase__ : List[str] = parser.parse_args()
lowerCAmelCase__ : int = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 212 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : int = XCLIPTextConfig()
# derive patch size from model name
lowerCAmelCase__ : Any = model_name.find('patch' )
lowerCAmelCase__ : Any = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
lowerCAmelCase__ : Any = XCLIPVisionConfig(patch_size=SCREAMING_SNAKE_CASE_ , num_frames=SCREAMING_SNAKE_CASE_ )
if "large" in model_name:
lowerCAmelCase__ : List[str] = 768
lowerCAmelCase__ : Optional[int] = 3_072
lowerCAmelCase__ : Any = 12
lowerCAmelCase__ : Optional[int] = 1_024
lowerCAmelCase__ : List[Any] = 4_096
lowerCAmelCase__ : Optional[Any] = 16
lowerCAmelCase__ : Any = 24
lowerCAmelCase__ : Dict = 768
lowerCAmelCase__ : List[str] = 3_072
if model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase__ : str = 336
lowerCAmelCase__ : Any = XCLIPConfig.from_text_vision_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "large" in model_name:
lowerCAmelCase__ : List[Any] = 768
return config
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
# text encoder
if name == "token_embedding.weight":
lowerCAmelCase__ : Optional[Any] = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
lowerCAmelCase__ : int = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
lowerCAmelCase__ : List[str] = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
lowerCAmelCase__ : Tuple = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
lowerCAmelCase__ : List[str] = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
lowerCAmelCase__ : Optional[Any] = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
lowerCAmelCase__ : List[str] = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
lowerCAmelCase__ : Dict = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
lowerCAmelCase__ : Any = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
lowerCAmelCase__ : Union[str, Any] = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
lowerCAmelCase__ : Dict = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
lowerCAmelCase__ : Tuple = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
lowerCAmelCase__ : Any = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
lowerCAmelCase__ : str = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
lowerCAmelCase__ : Tuple = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
lowerCAmelCase__ : Dict = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
lowerCAmelCase__ : List[str] = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
lowerCAmelCase__ : List[Any] = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ : Any = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "attn.in_proj" in key:
lowerCAmelCase__ : str = key.split('.' )
if key.startswith('visual' ):
lowerCAmelCase__ : str = key_split[3]
lowerCAmelCase__ : Optional[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowerCAmelCase__ : Any = val[
:dim, :
]
lowerCAmelCase__ : Optional[Any] = val[
dim : dim * 2, :
]
lowerCAmelCase__ : int = val[
-dim:, :
]
else:
lowerCAmelCase__ : str = val[
:dim
]
lowerCAmelCase__ : Union[str, Any] = val[
dim : dim * 2
]
lowerCAmelCase__ : Any = val[
-dim:
]
else:
if "weight" in key:
lowerCAmelCase__ : str = val[
:dim, :
]
lowerCAmelCase__ : Tuple = val[
dim : dim * 2, :
]
lowerCAmelCase__ : int = val[
-dim:, :
]
else:
lowerCAmelCase__ : List[Any] = val[:dim]
lowerCAmelCase__ : Optional[Any] = val[
dim : dim * 2
]
lowerCAmelCase__ : int = val[-dim:]
elif key.startswith('mit' ):
lowerCAmelCase__ : Dict = key_split[2]
lowerCAmelCase__ : str = config.vision_config.mit_hidden_size
if "weight" in key:
lowerCAmelCase__ : List[Any] = val[:dim, :]
lowerCAmelCase__ : List[Any] = val[dim : dim * 2, :]
lowerCAmelCase__ : Any = val[-dim:, :]
else:
lowerCAmelCase__ : int = val[:dim]
lowerCAmelCase__ : Optional[Any] = val[dim : dim * 2]
lowerCAmelCase__ : List[str] = val[-dim:]
else:
lowerCAmelCase__ : int = key_split[2]
lowerCAmelCase__ : Optional[Any] = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ : int = val[:dim, :]
lowerCAmelCase__ : List[str] = val[
dim : dim * 2, :
]
lowerCAmelCase__ : Dict = val[-dim:, :]
else:
lowerCAmelCase__ : List[str] = val[:dim]
lowerCAmelCase__ : int = val[
dim : dim * 2
]
lowerCAmelCase__ : Optional[int] = val[-dim:]
else:
lowerCAmelCase__ : Optional[int] = rename_key(SCREAMING_SNAKE_CASE_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowerCAmelCase__ : List[str] = val.T
lowerCAmelCase__ : List[str] = val
return orig_state_dict
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
if num_frames == 8:
lowerCAmelCase__ : Tuple = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
lowerCAmelCase__ : Dict = 'eating_spaghetti.npy'
elif num_frames == 32:
lowerCAmelCase__ : List[Any] = 'eating_spaghetti_32_frames.npy'
lowerCAmelCase__ : Dict = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=SCREAMING_SNAKE_CASE_ , repo_type='dataset' , )
lowerCAmelCase__ : Optional[int] = np.load(SCREAMING_SNAKE_CASE_ )
return list(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ) -> str:
lowerCAmelCase__ : Optional[int] = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
lowerCAmelCase__ : Dict = model_to_url[model_name]
lowerCAmelCase__ : List[str] = 8
if "16-frames" in model_name:
lowerCAmelCase__ : Any = 16
elif "shot" in model_name:
lowerCAmelCase__ : str = 32
lowerCAmelCase__ : str = get_xclip_config(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = XCLIPModel(SCREAMING_SNAKE_CASE_ )
model.eval()
if "drive" in checkpoint_url:
lowerCAmelCase__ : Any = 'pytorch_model.bin'
gdown.cached_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['model']
else:
lowerCAmelCase__ : Optional[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ )['model']
lowerCAmelCase__ : Optional[Any] = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = XCLIPModel(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : str = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowerCAmelCase__ : Optional[int] = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
lowerCAmelCase__ : List[str] = VideoMAEImageProcessor(size=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase__ : List[Any] = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase__ : Optional[Any] = XCLIPProcessor(image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = prepare_video(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , padding=SCREAMING_SNAKE_CASE_ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
# Verify outputs
lowerCAmelCase__ : Optional[Any] = outputs.logits_per_video
lowerCAmelCase__ : Tuple = logits_per_video.softmax(dim=1 )
print('Probs:' , SCREAMING_SNAKE_CASE_ )
# kinetics-400
if model_name == "xclip-base-patch32":
lowerCAmelCase__ : str = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowerCAmelCase__ : Dict = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
lowerCAmelCase__ : int = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowerCAmelCase__ : Any = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
lowerCAmelCase__ : Any = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowerCAmelCase__ : Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowerCAmelCase__ : int = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowerCAmelCase__ : Dict = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowerCAmelCase__ : Optional[Any] = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowerCAmelCase__ : Tuple = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowerCAmelCase__ : int = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowerCAmelCase__ : Optional[Any] = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowerCAmelCase__ : str = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowerCAmelCase__ : Optional[Any] = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowerCAmelCase__ : int = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowerCAmelCase__ : List[Any] = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='nielsr' )
processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='nielsr' )
slow_tokenizer.push_to_hub(SCREAMING_SNAKE_CASE_ , organization='nielsr' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 212 | 1 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ['''image_processor''']
__UpperCAmelCase : List[Any] = '''SamImageProcessor'''
def __init__( self : Dict ,_a : List[str] ):
'''simple docstring'''
super().__init__(_a )
_a : Union[str, Any] = self.image_processor
_a : Optional[int] = -10
_a : Optional[Any] = self.image_processor.size['longest_edge']
def __call__( self : Union[str, Any] ,_a : str=None ,_a : List[Any]=None ,_a : Tuple=None ,_a : Any=None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,):
'''simple docstring'''
_a : Dict = self.image_processor(
_a ,return_tensors=_a ,**_a ,)
# pop arguments that are not used in the foward but used nevertheless
_a : int = encoding_image_processor['original_sizes']
if hasattr(_a ,'numpy' ): # Checks if Torch or TF tensor
_a : List[Any] = original_sizes.numpy()
_a, _a, _a : Union[str, Any] = self._check_and_preprocess_points(
input_points=_a ,input_labels=_a ,input_boxes=_a ,)
_a : List[str] = self._normalize_and_convert(
_a ,_a ,input_points=_a ,input_labels=_a ,input_boxes=_a ,return_tensors=_a ,)
return encoding_image_processor
def __lowercase ( self : List[Any] ,_a : Dict ,_a : List[str] ,_a : List[Any]=None ,_a : int=None ,_a : Any=None ,_a : int="pt" ,):
'''simple docstring'''
if input_points is not None:
if len(_a ) != len(_a ):
_a : Optional[Any] = [
self._normalize_coordinates(self.target_size ,_a ,original_sizes[0] ) for point in input_points
]
else:
_a : Tuple = [
self._normalize_coordinates(self.target_size ,_a ,_a )
for point, original_size in zip(_a ,_a )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_a, _a : List[str] = self._pad_points_and_labels(_a ,_a )
_a : str = np.array(_a )
if input_labels is not None:
_a : Optional[int] = np.array(_a )
if input_boxes is not None:
if len(_a ) != len(_a ):
_a : Optional[int] = [
self._normalize_coordinates(self.target_size ,_a ,original_sizes[0] ,is_bounding_box=_a )
for box in input_boxes
]
else:
_a : Optional[Any] = [
self._normalize_coordinates(self.target_size ,_a ,_a ,is_bounding_box=_a )
for box, original_size in zip(_a ,_a )
]
_a : Any = np.array(_a )
if input_boxes is not None:
if return_tensors == "pt":
_a : Any = torch.from_numpy(_a )
# boxes batch size of 1 by default
_a : Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_a : Dict = tf.convert_to_tensor(_a )
# boxes batch size of 1 by default
_a : Tuple = tf.expand_dims(_a ,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_a : Dict = torch.from_numpy(_a )
# point batch size of 1 by default
_a : Optional[Any] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_a : List[Any] = tf.convert_to_tensor(_a )
# point batch size of 1 by default
_a : Tuple = tf.expand_dims(_a ,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
_a : str = torch.from_numpy(_a )
# point batch size of 1 by default
_a : Any = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_a : Optional[Any] = tf.convert_to_tensor(_a )
# point batch size of 1 by default
_a : Dict = tf.expand_dims(_a ,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[Any] ):
'''simple docstring'''
_a : Optional[int] = max([point.shape[0] for point in input_points] )
_a : List[str] = []
for i, point in enumerate(_a ):
if point.shape[0] != expected_nb_points:
_a : List[str] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] ,axis=0 )
_a : Optional[int] = np.append(input_labels[i] ,[self.point_pad_value] )
processed_input_points.append(_a )
_a : str = processed_input_points
return input_points, input_labels
def __lowercase ( self : List[str] ,_a : int ,_a : np.ndarray ,_a : List[str] ,_a : Union[str, Any]=False ):
'''simple docstring'''
_a, _a : Any = original_size
_a, _a : int = self.image_processor._get_preprocess_shape(_a ,longest_edge=_a )
_a : Dict = deepcopy(_a ).astype(_a )
if is_bounding_box:
_a : str = coords.reshape(-1 ,2 ,2 )
_a : List[Any] = coords[..., 0] * (new_w / old_w)
_a : str = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_a : List[Any] = coords.reshape(-1 ,4 )
return coords
def __lowercase ( self : str ,_a : str=None ,_a : int=None ,_a : Dict=None ,):
'''simple docstring'''
if input_points is not None:
if hasattr(_a ,'numpy' ): # Checks for TF or Torch tensor
_a : str = input_points.numpy().tolist()
if not isinstance(_a ,_a ) or not isinstance(input_points[0] ,_a ):
raise ValueError('Input points must be a list of list of floating points.' )
_a : int = [np.array(_a ) for input_point in input_points]
else:
_a : List[Any] = None
if input_labels is not None:
if hasattr(_a ,'numpy' ):
_a : Dict = input_labels.numpy().tolist()
if not isinstance(_a ,_a ) or not isinstance(input_labels[0] ,_a ):
raise ValueError('Input labels must be a list of list integers.' )
_a : Tuple = [np.array(_a ) for label in input_labels]
else:
_a : Union[str, Any] = None
if input_boxes is not None:
if hasattr(_a ,'numpy' ):
_a : Optional[Any] = input_boxes.numpy().tolist()
if (
not isinstance(_a ,_a )
or not isinstance(input_boxes[0] ,_a )
or not isinstance(input_boxes[0][0] ,_a )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
_a : List[Any] = [np.array(_a ).astype(np.floataa ) for box in input_boxes]
else:
_a : int = None
return input_points, input_labels, input_boxes
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(_a ) )
def __lowercase ( self : Dict ,*_a : int ,**_a : Any ):
'''simple docstring'''
return self.image_processor.post_process_masks(*_a ,**_a )
| 5 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ (__a : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_a : List[str] = BeautifulSoup(requests.get(__a ).text , 'html.parser' )
_a : Dict = soup.findAll('h1' )
_a : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__a , __a )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 31 |
import os
import string
import sys
__UpperCamelCase : List[Any] = 1 << 8
__UpperCamelCase : Union[str, Any] = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
__UpperCamelCase : Optional[Any] = KEYMAP["""up"""]
__UpperCamelCase : Tuple = KEYMAP["""left"""]
if sys.platform == "win32":
__UpperCamelCase : List[Any] = []
__UpperCamelCase : int = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
__UpperCamelCase : List[str] = ord(str(i))
def a_ ( ) -> Optional[int]:
"""simple docstring"""
if os.name == "nt":
import msvcrt
snake_case__ = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
snake_case__ = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
snake_case__ = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
snake_case__ = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
snake_case__ = chr(KEYMAP['esc'] )
except KeyError:
snake_case__ = cha[1]
else:
snake_case__ = ch.decode(_A )
else:
snake_case__ = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
snake_case__ = sys.stdin.fileno()
snake_case__ = termios.tcgetattr(_A )
try:
tty.setraw(_A )
snake_case__ = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def a_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
snake_case__ = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
snake_case__ = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 307 | 0 |
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case ( lowerCamelCase__ ):
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=False , snake_case__=True , snake_case__="None" , snake_case__=3 , snake_case__=4 , snake_case__=None , ) -> str:
'''simple docstring'''
UpperCAmelCase : str =parent
UpperCAmelCase : Tuple =batch_size
UpperCAmelCase : int =seq_length
UpperCAmelCase : List[Any] =is_training
UpperCAmelCase : str =use_input_mask
UpperCAmelCase : List[str] =use_token_type_ids
UpperCAmelCase : Any =use_labels
UpperCAmelCase : Union[str, Any] =vocab_size
UpperCAmelCase : str =hidden_size
UpperCAmelCase : List[str] =num_hidden_layers
UpperCAmelCase : str =num_attention_heads
UpperCAmelCase : List[Any] =intermediate_size
UpperCAmelCase : List[Any] =hidden_act
UpperCAmelCase : List[str] =hidden_dropout_prob
UpperCAmelCase : List[Any] =attention_probs_dropout_prob
UpperCAmelCase : Tuple =max_position_embeddings
UpperCAmelCase : str =type_vocab_size
UpperCAmelCase : str =type_sequence_label_size
UpperCAmelCase : Optional[Any] =initializer_range
UpperCAmelCase : Any =num_labels
UpperCAmelCase : int =num_choices
UpperCAmelCase : Optional[int] =relative_attention
UpperCAmelCase : Optional[int] =position_biased_input
UpperCAmelCase : Any =pos_att_type
UpperCAmelCase : List[Any] =scope
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Dict =None
if self.use_input_mask:
UpperCAmelCase : Tuple =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase : Any =None
if self.use_token_type_ids:
UpperCAmelCase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Any =None
UpperCAmelCase : List[str] =None
UpperCAmelCase : Optional[Any] =None
if self.use_labels:
UpperCAmelCase : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : int =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : int =ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Optional[int] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =self.get_config()
UpperCAmelCase : int =300
return config
def UpperCAmelCase__ ( self , snake_case__ ) -> Tuple:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Any:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =DebertaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase : Any =model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )[0]
UpperCAmelCase : List[Any] =model(snake_case__ , token_type_ids=snake_case__ )[0]
UpperCAmelCase : str =model(snake_case__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple =DebertaForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase : str =model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =self.num_labels
UpperCAmelCase : int =DebertaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase : Optional[int] =model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Any:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =self.num_labels
UpperCAmelCase : Optional[Any] =DebertaForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase : Optional[int] =model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Dict =DebertaForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase : Any =model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Dict =self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : int =config_and_inputs
UpperCAmelCase : Union[str, Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Dict = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCamelCase : Dict = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : List[str] = True
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : List[str] = False
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =DebertaModelTester(self )
UpperCAmelCase : Optional[int] =ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case__ )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case__ )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case__ )
@slow
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : List[Any] =DebertaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] =DebertaModel.from_pretrained('''microsoft/deberta-base''' )
UpperCAmelCase : Optional[int] =torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCAmelCase : int =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase : Dict =model(snake_case__ , attention_mask=snake_case__ )[0]
# compare the actual values for a slice.
UpperCAmelCase : List[Any] =torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 78 | from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowerCAmelCase_ ( __lowerCAmelCase = "isbn/0140328726" )-> dict:
'''simple docstring'''
UpperCAmelCase : Tuple =olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
UpperCAmelCase : Any =f'''{olid} is not a valid Open Library olid'''
raise ValueError(__lowerCAmelCase )
return requests.get(f'''https://openlibrary.org/{new_olid}.json''' ).json()
def lowerCAmelCase_ ( __lowerCAmelCase )-> dict:
'''simple docstring'''
UpperCAmelCase : int ={
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
UpperCAmelCase : Tuple ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCAmelCase : Optional[int] =[
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
UpperCAmelCase : List[str] =data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : str =''', '''.join(__lowerCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__snake_case = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
__snake_case = summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print('''\n'''.join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.')
| 78 | 1 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : int , lowercase : list ) -> int:
"""simple docstring"""
_enforce_args(lowercase , lowercase )
if n == 0:
return 0
snake_case : Dict = float("-inf" )
for i in range(1 , n + 1 ):
snake_case : Optional[int] = max(
lowercase , prices[i - 1] + naive_cut_rod_recursive(n - i , lowercase ) )
return max_revue
def __lowerCAmelCase ( lowercase : int , lowercase : list ) -> Union[str, Any]:
"""simple docstring"""
_enforce_args(lowercase , lowercase )
snake_case : Union[str, Any] = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowercase , lowercase , lowercase )
def __lowerCAmelCase ( lowercase : int , lowercase : list , lowercase : list ) -> Optional[int]:
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
snake_case : Tuple = float("-inf" )
for i in range(1 , n + 1 ):
snake_case : Tuple = max(
lowercase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowercase , lowercase ) , )
snake_case : Union[str, Any] = max_revenue
return max_rev[n]
def __lowerCAmelCase ( lowercase : int , lowercase : list ) -> int:
"""simple docstring"""
_enforce_args(lowercase , lowercase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
snake_case : Dict = [float("-inf" ) for _ in range(n + 1 )]
snake_case : Optional[Any] = 0
for i in range(1 , n + 1 ):
snake_case : int = max_rev[i]
for j in range(1 , i + 1 ):
snake_case : Optional[int] = max(lowercase , prices[j - 1] + max_rev[i - j] )
snake_case : List[Any] = max_revenue_i
return max_rev[n]
def __lowerCAmelCase ( lowercase : int , lowercase : list ) -> Union[str, Any]:
"""simple docstring"""
if n < 0:
snake_case : int = F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(lowercase )
if n > len(lowercase ):
snake_case : int = (
"Each integral piece of rod must have a corresponding price. "
F'Got n = {n} but length of prices = {len(lowercase )}'
)
raise ValueError(lowercase )
def __lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case : int = [6, 10, 12, 15, 20, 23]
snake_case : Dict = len(lowercase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
snake_case : Any = 36
snake_case : Optional[Any] = top_down_cut_rod(lowercase , lowercase )
snake_case : Any = bottom_up_cut_rod(lowercase , lowercase )
snake_case : Optional[Any] = naive_cut_rod_recursive(lowercase , lowercase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 203 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__snake_case = """hf-internal-testing/tiny-random-bert"""
__snake_case = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
__snake_case = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : int = cached_file(UpperCamelCase__ , UpperCamelCase__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCamelCase__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) )
with open(os.path.join(UpperCamelCase__ , "refs" , "main" ) ) as f:
snake_case : Dict = f.read()
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , "snapshots" , UpperCamelCase__ , UpperCamelCase__ ) )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# File is cached at the same place the second time.
snake_case : List[str] = cached_file(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# Using a specific revision to test the full commit hash.
snake_case : Any = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="9b8c223" )
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , "snapshots" , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid model identifier" ):
snake_case : Optional[Any] = cached_file("tiny-random-bert" , UpperCamelCase__ )
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid git identifier" ):
snake_case : Optional[Any] = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="aaaa" )
with self.assertRaisesRegex(UpperCamelCase__ , "does not appear to have a file named" ):
snake_case : List[Any] = cached_file(UpperCamelCase__ , "conf" )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(UpperCamelCase__ , "does not appear to have a file named" ):
snake_case : Tuple = cached_file(UpperCamelCase__ , "conf" )
with open(os.path.join(UpperCamelCase__ , "refs" , "main" ) ) as f:
snake_case : Any = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , ".no_exist" , UpperCamelCase__ , "conf" ) ) )
snake_case : Optional[Any] = cached_file(UpperCamelCase__ , "conf" , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
snake_case : Any = cached_file(UpperCamelCase__ , "conf" , local_files_only=UpperCamelCase__ , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
snake_case : Any = mock.Mock()
snake_case : List[Any] = 500
snake_case : int = {}
snake_case : Optional[int] = HTTPError
snake_case : Tuple = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=UpperCamelCase__ ) as mock_head:
snake_case : Tuple = cached_file(UpperCamelCase__ , "conf" , _raise_exceptions_for_connection_errors=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCamelCase__ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCamelCase__ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCamelCase__ ) )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , UpperCamelCase__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , UpperCamelCase__ , revision="ahaha" )
snake_case : int = get_file_from_repo("bert-base-cased" , UpperCamelCase__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case : str = json.loads(open(UpperCamelCase__ , "r" ).read() )
self.assertEqual(config["hidden_size"] , 768 )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : int = Path(UpperCamelCase__ ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(UpperCamelCase__ , "a.txt" ) , str(UpperCamelCase__ ) )
self.assertIsNone(get_file_from_repo(UpperCamelCase__ , "b.txt" ) )
| 203 | 1 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Tuple =(DDPMScheduler,)
def UpperCamelCase_ ( self : Optional[int] , **UpperCamelCase : Tuple ):
'''simple docstring'''
_snake_case : Dict = {
'num_train_timesteps': 10_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**UpperCamelCase )
return config
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase , beta_end=UpperCamelCase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.check_over_configs(thresholding=UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase , prediction_type=UpperCamelCase , sample_max_value=UpperCamelCase , )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Dict = self.scheduler_classes[0]
_snake_case : Union[str, Any] = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.scheduler_classes[0]
_snake_case : Optional[Any] = self.get_scheduler_config()
_snake_case : int = scheduler_class(**UpperCamelCase )
_snake_case : Tuple = len(UpperCamelCase )
_snake_case : Dict = self.dummy_model()
_snake_case : Optional[int] = self.dummy_sample_deter
_snake_case : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase ) ):
# 1. predict noise residual
_snake_case : Tuple = model(UpperCamelCase , UpperCamelCase )
# 2. predict previous mean of sample x_t-1
_snake_case : Tuple = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_snake_case : Dict = pred_prev_sample
_snake_case : int = torch.sum(torch.abs(UpperCamelCase ) )
_snake_case : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1e-2
assert abs(result_mean.item() - 0.33_72 ) < 1e-3
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.scheduler_classes[0]
_snake_case : List[Any] = self.get_scheduler_config(prediction_type='v_prediction' )
_snake_case : str = scheduler_class(**UpperCamelCase )
_snake_case : Union[str, Any] = len(UpperCamelCase )
_snake_case : Tuple = self.dummy_model()
_snake_case : Tuple = self.dummy_sample_deter
_snake_case : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase ) ):
# 1. predict noise residual
_snake_case : int = model(UpperCamelCase , UpperCamelCase )
# 2. predict previous mean of sample x_t-1
_snake_case : Optional[int] = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_snake_case : int = pred_prev_sample
_snake_case : str = torch.sum(torch.abs(UpperCamelCase ) )
_snake_case : Optional[int] = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1e-2
assert abs(result_mean.item() - 0.26_31 ) < 1e-3
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = self.scheduler_classes[0]
_snake_case : List[Any] = self.get_scheduler_config()
_snake_case : Optional[int] = scheduler_class(**UpperCamelCase )
_snake_case : int = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase )
_snake_case : str = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase ):
if i == len(UpperCamelCase ) - 1:
_snake_case : Dict = -1
else:
_snake_case : List[str] = timesteps[i + 1]
_snake_case : Tuple = scheduler.previous_timestep(UpperCamelCase )
_snake_case : Union[str, Any] = prev_t.item()
self.assertEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : int = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config()
_snake_case : Optional[int] = scheduler_class(**UpperCamelCase )
_snake_case : int = [1_00, 87, 50, 51, 0]
with self.assertRaises(UpperCamelCase , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = self.scheduler_classes[0]
_snake_case : Optional[Any] = self.get_scheduler_config()
_snake_case : Optional[Any] = scheduler_class(**UpperCamelCase )
_snake_case : Union[str, Any] = [1_00, 87, 50, 1, 0]
_snake_case : Any = len(UpperCamelCase )
with self.assertRaises(UpperCamelCase , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase , timesteps=UpperCamelCase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Tuple = self.scheduler_classes[0]
_snake_case : Dict = self.get_scheduler_config()
_snake_case : Optional[Any] = scheduler_class(**UpperCamelCase )
_snake_case : Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
| 260 |
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
a_ : Optional[int] =["""speech"""]
def __init__( self : Optional[int] , *UpperCamelCase : int , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(self , ['speech'] )
class _lowerCAmelCase ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
a_ : Optional[Any] =["""speech"""]
def __init__( self : Any , *UpperCamelCase : Any , **UpperCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(self , ['speech'] )
| 260 | 1 |
'''simple docstring'''
def __lowerCamelCase ( A__ = 10**9 ) -> int:
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 28 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 281 | 0 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = FunnelTokenizer
UpperCAmelCase__ = FunnelTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
def A_ ( self : Any ) -> Dict:
super().setUp()
lowerCamelCase__ : Tuple = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCamelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : List[Any] , **UpperCAmelCase : Any ) -> Tuple:
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A_ ( self : Tuple , **UpperCAmelCase : Tuple ) -> List[str]:
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A_ ( self : Optional[int] , UpperCAmelCase : int ) -> str:
lowerCamelCase__ : Optional[Any] = 'UNwant\u00E9d,running'
lowerCamelCase__ : str = 'unwanted, running'
return input_text, output_text
def A_ ( self : Dict ) -> str:
lowerCamelCase__ : Union[str, Any] = self.tokenizer_class(self.vocab_file )
lowerCamelCase__ : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def A_ ( self : int ) -> str:
lowerCamelCase__ : List[Any] = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
lowerCamelCase__ : Optional[int] = tokenizer('UNwant\u00E9d,running' )
lowerCamelCase__ : List[str] = len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len )
lowerCamelCase__ : Tuple = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
| 45 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
_UpperCAmelCase : Tuple = [
"""openmmlab/upernet-convnext-tiny""",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_UpperCAmelCase : List[str] = """UperNetConfig"""
class lowerCAmelCase ( nn.Module ):
def __init__( self : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Union[int, Tuple[int, int]] , UpperCAmelCase : Union[int, Tuple[int, int], str] = 0 , UpperCAmelCase : bool = False , UpperCAmelCase : Union[int, Tuple[int, int]] = 1 , ) -> None:
super().__init__()
lowerCamelCase__ : Any = nn.Convad(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , kernel_size=UpperCAmelCase , padding=UpperCAmelCase , bias=UpperCAmelCase , dilation=UpperCAmelCase , )
lowerCamelCase__ : str = nn.BatchNormad(UpperCAmelCase )
lowerCamelCase__ : Tuple = nn.ReLU()
def A_ ( self : Tuple , UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
lowerCamelCase__ : Tuple = self.conv(UpperCAmelCase )
lowerCamelCase__ : int = self.batch_norm(UpperCAmelCase )
lowerCamelCase__ : List[Any] = self.activation(UpperCAmelCase )
return output
class lowerCAmelCase ( nn.Module ):
def __init__( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ) -> None:
super().__init__()
lowerCamelCase__ : int = [
nn.AdaptiveAvgPoolad(UpperCAmelCase ),
UperNetConvModule(UpperCAmelCase , UpperCAmelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(UpperCAmelCase ) , UpperCAmelCase )
def A_ ( self : Union[str, Any] , UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
lowerCamelCase__ : Dict = input
for layer in self.layers:
lowerCamelCase__ : Tuple = layer(UpperCAmelCase )
return hidden_state
class lowerCAmelCase ( nn.Module ):
def __init__( self : Tuple , UpperCAmelCase : Tuple[int, ...] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : bool ) -> None:
super().__init__()
lowerCamelCase__ : int = pool_scales
lowerCamelCase__ : Tuple = align_corners
lowerCamelCase__ : Union[str, Any] = in_channels
lowerCamelCase__ : List[Any] = channels
lowerCamelCase__ : Tuple = []
for i, pool_scale in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Dict = UperNetPyramidPoolingBlock(pool_scale=UpperCAmelCase , in_channels=UpperCAmelCase , channels=UpperCAmelCase )
self.blocks.append(UpperCAmelCase )
self.add_module(str(UpperCAmelCase ) , UpperCAmelCase )
def A_ ( self : Optional[int] , UpperCAmelCase : torch.Tensor ) -> List[torch.Tensor]:
lowerCamelCase__ : Tuple = []
for ppm in self.blocks:
lowerCamelCase__ : Union[str, Any] = ppm(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = nn.functional.interpolate(
UpperCAmelCase , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(UpperCAmelCase )
return ppm_outs
class lowerCAmelCase ( nn.Module ):
def __init__( self : str , UpperCAmelCase : int , UpperCAmelCase : Any ) -> int:
super().__init__()
lowerCamelCase__ : Tuple = config
lowerCamelCase__ : Optional[Any] = config.pool_scales # e.g. (1, 2, 3, 6)
lowerCamelCase__ : List[Any] = in_channels
lowerCamelCase__ : Optional[int] = config.hidden_size
lowerCamelCase__ : Dict = False
lowerCamelCase__ : List[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowerCamelCase__ : Tuple = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowerCamelCase__ : int = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowerCamelCase__ : str = nn.ModuleList()
lowerCamelCase__ : str = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowerCamelCase__ : str = UperNetConvModule(UpperCAmelCase , self.channels , kernel_size=1 )
lowerCamelCase__ : int = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(UpperCAmelCase )
self.fpn_convs.append(UpperCAmelCase )
lowerCamelCase__ : List[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def A_ ( self : Tuple ) -> List[Any]:
self.apply(self._init_weights )
def A_ ( self : Tuple , UpperCAmelCase : Dict ) -> str:
if isinstance(UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[int] ) -> Optional[int]:
lowerCamelCase__ : str = inputs[-1]
lowerCamelCase__ : List[str] = [x]
psp_outs.extend(self.psp_modules(UpperCAmelCase ) )
lowerCamelCase__ : Tuple = torch.cat(UpperCAmelCase , dim=1 )
lowerCamelCase__ : Optional[Any] = self.bottleneck(UpperCAmelCase )
return output
def A_ ( self : str , UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
# build laterals
lowerCamelCase__ : Union[str, Any] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(UpperCAmelCase ) )
# build top-down path
lowerCamelCase__ : Tuple = len(UpperCAmelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowerCamelCase__ : Optional[Any] = laterals[i - 1].shape[2:]
lowerCamelCase__ : Any = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=UpperCAmelCase , mode='bilinear' , align_corners=self.align_corners )
# build outputs
lowerCamelCase__ : Any = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowerCamelCase__ : Optional[Any] = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
lowerCamelCase__ : Dict = torch.cat(UpperCAmelCase , dim=1 )
lowerCamelCase__ : List[str] = self.fpn_bottleneck(UpperCAmelCase )
lowerCamelCase__ : int = self.classifier(UpperCAmelCase )
return output
class lowerCAmelCase ( nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 3 , UpperCAmelCase : Union[int, Tuple[int, int]] = 1 ) -> None:
super().__init__()
lowerCamelCase__ : Any = config
lowerCamelCase__ : Optional[Any] = config.auxiliary_in_channels
lowerCamelCase__ : str = config.auxiliary_channels
lowerCamelCase__ : Optional[Any] = config.auxiliary_num_convs
lowerCamelCase__ : str = config.auxiliary_concat_input
lowerCamelCase__ : List[Any] = in_index
lowerCamelCase__ : List[str] = (kernel_size // 2) * dilation
lowerCamelCase__ : Optional[int] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=UpperCAmelCase , padding=UpperCAmelCase , dilation=UpperCAmelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=UpperCAmelCase , padding=UpperCAmelCase , dilation=UpperCAmelCase ) )
if self.num_convs == 0:
lowerCamelCase__ : Optional[Any] = nn.Identity()
else:
lowerCamelCase__ : Optional[Any] = nn.Sequential(*UpperCAmelCase )
if self.concat_input:
lowerCamelCase__ : Any = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=UpperCAmelCase , padding=kernel_size // 2 )
lowerCamelCase__ : Dict = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def A_ ( self : Tuple ) -> Tuple:
self.apply(self._init_weights )
def A_ ( self : Union[str, Any] , UpperCAmelCase : List[Any] ) -> List[str]:
if isinstance(UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def A_ ( self : Tuple , UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
# just take the relevant feature maps
lowerCamelCase__ : str = encoder_hidden_states[self.in_index]
lowerCamelCase__ : Union[str, Any] = self.convs(UpperCAmelCase )
if self.concat_input:
lowerCamelCase__ : Optional[Any] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowerCamelCase__ : Optional[int] = self.classifier(UpperCAmelCase )
return output
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = UperNetConfig
UpperCAmelCase__ = """pixel_values"""
UpperCAmelCase__ = True
def A_ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def A_ ( self : str ) -> Tuple:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def A_ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]=False ) -> str:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : Any = value
_UpperCAmelCase : List[Any] = R"""
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase : Union[str, Any] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""", __UpperCamelCase, )
class lowerCAmelCase ( __UpperCamelCase ):
def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any] ) -> Dict:
super().__init__(UpperCAmelCase )
lowerCamelCase__ : List[str] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowerCamelCase__ : List[Any] = UperNetHead(UpperCAmelCase , in_channels=self.backbone.channels )
lowerCamelCase__ : int = UperNetFCNHead(UpperCAmelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC )
def A_ ( self : Union[str, Any] , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]:
lowerCamelCase__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__ : str = output_attentions if output_attentions is not None else self.config.output_attentions
lowerCamelCase__ : Optional[Any] = self.backbone.forward_with_filtered_kwargs(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , output_attentions=UpperCAmelCase )
lowerCamelCase__ : List[str] = outputs.feature_maps
lowerCamelCase__ : Optional[int] = self.decode_head(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = nn.functional.interpolate(UpperCAmelCase , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=UpperCAmelCase )
lowerCamelCase__ : List[str] = None
if self.auxiliary_head is not None:
lowerCamelCase__ : List[Any] = self.auxiliary_head(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = nn.functional.interpolate(
UpperCAmelCase , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=UpperCAmelCase )
lowerCamelCase__ : List[str] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
lowerCamelCase__ : Optional[Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowerCamelCase__ : str = loss_fct(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = loss_fct(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowerCamelCase__ : List[str] = (logits,) + outputs[1:]
else:
lowerCamelCase__ : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=UpperCAmelCase , logits=UpperCAmelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 45 | 1 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
return (data["data"], data["target"])
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> Any:
"""simple docstring"""
_lowercase =XGBClassifier()
classifier.fit(_a , _a )
return classifier
def UpperCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
_lowercase =load_iris()
_lowercase , _lowercase =data_handling(_a )
_lowercase , _lowercase , _lowercase , _lowercase =train_test_split(
_a , _a , test_size=0.25 )
_lowercase =iris['''target_names''']
# Create an XGBoost Classifier from the training data
_lowercase =xgboost(_a , _a )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_a , _a , _a , display_labels=_a , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 5 |
'''simple docstring'''
def snake_case_ (_a : str , _a : str ):
UpperCAmelCase = len(_a ) + 1
UpperCAmelCase = len(_a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase = [[0 for i in range(_a )] for j in range(_a )]
# since string of zero length match pattern of zero length
UpperCAmelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _a ):
UpperCAmelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _a ):
UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _a ):
for j in range(1 , _a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase = dp[i - 1][j]
else:
UpperCAmelCase = 0
else:
UpperCAmelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
A ='aab'
A ='c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 34 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[str] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__lowerCAmelCase : int = [144, 192, 240]
__lowerCAmelCase : Union[str, Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
__lowerCAmelCase : Union[str, Any] = [96, 120, 144]
__lowerCAmelCase : Optional[int] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
__lowerCAmelCase : List[str] = [64, 80, 96]
__lowerCAmelCase : Any = [16, 16, 24, 48, 64, 80, 320]
__lowerCAmelCase : int = 0.05
__lowerCAmelCase : List[str] = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
__lowerCAmelCase : Optional[int] = 512
__lowerCAmelCase : Optional[int] = 16
__lowerCAmelCase : List[Any] = 21
__lowerCAmelCase : Optional[int] = 'pascal-voc-id2label.json'
else:
__lowerCAmelCase : int = 1000
__lowerCAmelCase : Dict = 'imagenet-1k-id2label.json'
__lowerCAmelCase : Optional[int] = 'huggingface/label-files'
__lowerCAmelCase : Tuple = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
__lowerCAmelCase : Any = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase : Union[str, Any] = idalabel
__lowerCAmelCase : Any = {v: k for k, v in idalabel.items()}
return config
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=False ):
for i in range(1 , 6 ):
if F"layer_{i}." in name:
__lowerCAmelCase : Tuple = name.replace(F"layer_{i}." , F"encoder.layer.{i - 1}." )
if "conv_1." in name:
__lowerCAmelCase : Optional[Any] = name.replace('conv_1.' , 'conv_stem.' )
if ".block." in name:
__lowerCAmelCase : List[str] = name.replace('.block.' , '.' )
if "exp_1x1" in name:
__lowerCAmelCase : List[str] = name.replace('exp_1x1' , 'expand_1x1' )
if "red_1x1" in name:
__lowerCAmelCase : str = name.replace('red_1x1' , 'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
__lowerCAmelCase : Optional[int] = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
__lowerCAmelCase : Any = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )
if ".norm." in name:
__lowerCAmelCase : Dict = name.replace('.norm.' , '.normalization.' )
if ".conv." in name:
__lowerCAmelCase : Dict = name.replace('.conv.' , '.convolution.' )
if ".conv_proj." in name:
__lowerCAmelCase : Union[str, Any] = name.replace('.conv_proj.' , '.conv_projection.' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
__lowerCAmelCase : Dict = name.replace(F".{i}.{j}." , F".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F".{i}.{j}." in name:
__lowerCAmelCase : Tuple = name.replace(F".{i}.{j}." , F".{i}." )
if "expand_1x1" in name:
__lowerCAmelCase : Dict = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
__lowerCAmelCase : Any = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
__lowerCAmelCase : Tuple = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )
for i in range(2 , 5 ):
if F".global_rep.{i}.weight" in name:
__lowerCAmelCase : Tuple = name.replace(F".global_rep.{i}.weight" , '.layernorm.weight' )
if F".global_rep.{i}.bias" in name:
__lowerCAmelCase : Optional[Any] = name.replace(F".global_rep.{i}.bias" , '.layernorm.bias' )
if ".global_rep." in name:
__lowerCAmelCase : Optional[Any] = name.replace('.global_rep.' , '.transformer.' )
if ".pre_norm_mha.0." in name:
__lowerCAmelCase : Dict = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
__lowerCAmelCase : Any = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
__lowerCAmelCase : Optional[int] = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
__lowerCAmelCase : Dict = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
__lowerCAmelCase : List[Any] = name.replace('.pre_norm_ffn.4.' , '.output.dense.' )
if ".transformer." in name:
__lowerCAmelCase : Tuple = name.replace('.transformer.' , '.transformer.layer.' )
if ".aspp_layer." in name:
__lowerCAmelCase : Optional[int] = name.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in name:
__lowerCAmelCase : List[str] = name.replace('.aspp_pool.' , '.' )
if "seg_head." in name:
__lowerCAmelCase : Optional[Any] = name.replace('seg_head.' , 'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
__lowerCAmelCase : int = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )
if "classifier.fc." in name:
__lowerCAmelCase : Tuple = name.replace('classifier.fc.' , 'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
__lowerCAmelCase : Union[str, Any] = 'mobilevit.' + name
return name
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
if base_model:
__lowerCAmelCase : Optional[Any] = ''
else:
__lowerCAmelCase : Dict = 'mobilevit.'
for key in orig_state_dict.copy().keys():
__lowerCAmelCase : Any = orig_state_dict.pop(_UpperCamelCase )
if key[:8] == "encoder.":
__lowerCAmelCase : Union[str, Any] = key[8:]
if "qkv" in key:
__lowerCAmelCase : List[Any] = key.split('.' )
__lowerCAmelCase : Union[str, Any] = int(key_split[0][6:] ) - 1
__lowerCAmelCase : List[str] = int(key_split[3] )
__lowerCAmelCase : Dict = model.get_submodule(F"{model_prefix}encoder.layer.{layer_num}" )
__lowerCAmelCase : str = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__lowerCAmelCase : List[str] = (
F"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
__lowerCAmelCase : Dict = val[:dim, :]
__lowerCAmelCase : Union[str, Any] = val[dim : dim * 2, :]
__lowerCAmelCase : Union[str, Any] = val[-dim:, :]
else:
__lowerCAmelCase : Tuple = val[:dim]
__lowerCAmelCase : Optional[int] = val[dim : dim * 2]
__lowerCAmelCase : Any = val[-dim:]
else:
__lowerCAmelCase : List[str] = val
return orig_state_dict
def __lowerCAmelCase ():
__lowerCAmelCase : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase : Any = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
__lowerCAmelCase : Dict = get_mobilevit_config(_UpperCamelCase )
# load original state_dict
__lowerCAmelCase : Dict = torch.load(_UpperCamelCase , map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
__lowerCAmelCase : List[str] = MobileViTForSemanticSegmentation(_UpperCamelCase ).eval()
else:
__lowerCAmelCase : str = MobileViTForImageClassification(_UpperCamelCase ).eval()
__lowerCAmelCase : Optional[Any] = convert_state_dict(_UpperCamelCase , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
__lowerCAmelCase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__lowerCAmelCase : Dict = image_processor(images=prepare_img() , return_tensors='pt' )
__lowerCAmelCase : List[str] = model(**_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__lowerCAmelCase : Any = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__lowerCAmelCase : List[Any] = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__lowerCAmelCase : Union[str, Any] = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , _UpperCamelCase , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
__lowerCAmelCase : Any = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
__lowerCAmelCase : Tuple = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
__lowerCAmelCase : Optional[int] = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1e-4 )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(F"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
__lowerCAmelCase : List[Any] = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
__lowerCAmelCase : str = model_mapping[mobilevit_name]
image_processor.push_to_hub(_UpperCamelCase , organization='apple' )
model.push_to_hub(_UpperCamelCase , organization='apple' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 182 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels | 182 | 1 |
'''simple docstring'''
import math
def lowerCamelCase ( __lowerCamelCase : int ) ->bool:
_SCREAMING_SNAKE_CASE = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__lowerCamelCase )
def lowerCamelCase ( __lowerCamelCase : float = 1 / 1_2345 ) ->int:
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 3
while True:
_SCREAMING_SNAKE_CASE = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = int(__lowerCamelCase )
total_partitions += 1
if check_partition_perfect(__lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 58 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A ( A_ ):
UpperCamelCase_ : torch.FloatTensor
class A ( A_ , A_ ):
@register_to_config
def __init__(self , lowerCAmelCase = 3 , lowerCAmelCase = 3 , lowerCAmelCase = ("DownEncoderBlock2D",) , lowerCAmelCase = ("UpDecoderBlock2D",) , lowerCAmelCase = (6_4,) , lowerCAmelCase = 1 , lowerCAmelCase = "silu" , lowerCAmelCase = 3 , lowerCAmelCase = 3_2 , lowerCAmelCase = 2_5_6 , lowerCAmelCase = 3_2 , lowerCAmelCase = None , lowerCAmelCase = 0.1_82_15 , lowerCAmelCase = "group" , ):
super().__init__()
# pass init params to Encoder
__lowercase= Encoder(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , down_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , double_z=lowerCAmelCase , )
__lowercase= vq_embed_dim if vq_embed_dim is not None else latent_channels
__lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 )
__lowercase= VectorQuantizer(lowerCAmelCase , lowerCAmelCase , beta=0.25 , remap=lowerCAmelCase , sane_index_shape=lowerCAmelCase )
__lowercase= nn.Convad(lowerCAmelCase , lowerCAmelCase , 1 )
# pass init params to Decoder
__lowercase= Decoder(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , up_block_types=lowerCAmelCase , block_out_channels=lowerCAmelCase , layers_per_block=lowerCAmelCase , act_fn=lowerCAmelCase , norm_num_groups=lowerCAmelCase , norm_type=lowerCAmelCase , )
@apply_forward_hook
def _A (self , lowerCAmelCase , lowerCAmelCase = True ):
__lowercase= self.encoder(lowerCAmelCase )
__lowercase= self.quant_conv(lowerCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCAmelCase )
@apply_forward_hook
def _A (self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True ):
# also go through quantization layer
if not force_not_quantize:
__lowercase, __lowercase, __lowercase= self.quantize(lowerCAmelCase )
else:
__lowercase= h
__lowercase= self.post_quant_conv(lowerCAmelCase )
__lowercase= self.decoder(lowerCAmelCase , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase = True ):
__lowercase= sample
__lowercase= self.encode(lowerCAmelCase ).latents
__lowercase= self.decode(lowerCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase )
| 295 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
a : str = 0
a : Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a : Union[str, Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
a : List[str] = tuple[int, int]
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> int:
a : int = pos_x
a : str = pos_y
a : List[str] = (pos_y, pos_x)
a : str = goal_x
a : str = goal_y
a : List[str] = g_cost
a : Optional[Any] = parent
a : str = self.calculate_heuristic()
a : Tuple = self.g_cost + self.h_cost
def __a ( self ) -> List[str]:
a : Union[str, Any] = self.pos_x - self.goal_x
a : Dict = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCAmelCase__ ) + abs(lowerCAmelCase__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , lowerCAmelCase__ ) -> Union[str, Any]:
return self.f_cost < other.f_cost
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
a : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCAmelCase__ )
a : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , lowerCAmelCase__ )
a : Dict = [self.start]
a : List[Any] = []
a : List[str] = False
def __a ( self ) -> Optional[Any]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
a : str = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCAmelCase__ )
self.closed_nodes.append(lowerCAmelCase__ )
a : Dict = self.get_successors(lowerCAmelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCAmelCase__ )
else:
# retrieve the best current path
a : Dict = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCAmelCase__ )
else:
self.open_nodes.append(lowerCAmelCase__ )
return [self.start.pos]
def __a ( self , lowerCAmelCase__ ) -> Optional[Any]:
a : Union[str, Any] = []
for action in delta:
a : int = parent.pos_x + action[1]
a : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCAmelCase__ , lowerCAmelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCAmelCase__ , ) )
return successors
def __a ( self , lowerCAmelCase__ ) -> Dict:
a : Optional[Any] = node
a : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
a : Tuple = current_node.parent
path.reverse()
return path
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
a : str = AStar(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[Any] = AStar(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = False
def __a ( self ) -> str:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
a : List[Any] = self.fwd_astar.open_nodes.pop(0 )
a : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCAmelCase__ , lowerCAmelCase__ )
self.fwd_astar.closed_nodes.append(lowerCAmelCase__ )
self.bwd_astar.closed_nodes.append(lowerCAmelCase__ )
a : str = current_bwd_node
a : Dict = current_fwd_node
a : int = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCAmelCase__ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCAmelCase__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCAmelCase__ )
else:
# retrieve the best current path
a : Dict = astar.open_nodes.pop(
astar.open_nodes.index(lowerCAmelCase__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCAmelCase__ )
else:
astar.open_nodes.append(lowerCAmelCase__ )
return [self.fwd_astar.start.pos]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
a : Optional[int] = self.fwd_astar.retrace_path(lowerCAmelCase__ )
a : Optional[Any] = self.bwd_astar.retrace_path(lowerCAmelCase__ )
bwd_path.pop()
bwd_path.reverse()
a : Tuple = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
a : Any = (0, 0)
a : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a : List[str] = time.time()
a : int = AStar(init, goal)
a : int = a_star.search()
a : Any = time.time() - start_time
print(F'''AStar execution time = {end_time:f} seconds''')
a : Any = time.time()
a : Union[str, Any] = BidirectionalAStar(init, goal)
a : List[Any] = time.time() - bd_start_time
print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 353 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a : Dict = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
a : List[str] = 10
a : Optional[int] = 256
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->Optional[MinHash]:
'''simple docstring'''
if len(_lowercase ) < MIN_NUM_TOKENS:
return None
a : Any = MinHash(num_perm=_lowercase )
for token in set(_lowercase ):
min_hash.update(token.encode() )
return min_hash
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(_lowercase ) if len(t.strip() ) > 0}
class __UpperCamelCase :
def __init__( self , *,
lowerCAmelCase__ = 0.85 , ) -> Any:
a : Any = duplication_jaccard_threshold
a : Dict = NUM_PERM
a : Dict = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
a : List[str] = defaultdict(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
a : Any = self._index.query(lowerCAmelCase__ )
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""" )
return
self._index.insert(lowerCAmelCase__ , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowerCAmelCase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowerCAmelCase__ )
def __a ( self ) -> List[List[Dict]]:
a : Any = []
for base, duplicates in self._duplicate_clusters.items():
a : Any = [base] + list(lowerCAmelCase__ )
# reformat the cluster to be a list of dict
a : Optional[int] = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(lowerCAmelCase__ )
return duplicate_clusters
def __a ( self , lowerCAmelCase__ ) -> None:
a : Optional[int] = self.get_duplicate_clusters()
with open(lowerCAmelCase__ , "w" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->Optional[Any]:
'''simple docstring'''
a, a : Optional[Any] = element
a : Any = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _SCREAMING_SNAKE_CASE ( _lowercase : Type[Dataset] ) ->Optional[int]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_lowercase , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def _SCREAMING_SNAKE_CASE ( _lowercase : Type[Dataset] , _lowercase : float ) ->Dict:
'''simple docstring'''
a : Optional[int] = DuplicationIndex(duplication_jaccard_threshold=_lowercase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowercase ) ) , max_queue_size=100 ) ):
di.add(_lowercase , _lowercase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : str ) ->float:
'''simple docstring'''
a : Any = get_tokens(_lowercase )
a : List[str] = get_tokens(_lowercase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a : Dict = None
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : Dict ) ->Tuple:
'''simple docstring'''
a : Union[str, Any] = []
for elementa in cluster:
a : List[Any] = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
a : Optional[int] = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(_lowercase , _lowercase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
a : Optional[Any] = 1
extremes.append(_lowercase )
return extremes
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : Any , _lowercase : Optional[Any] ) ->Any:
'''simple docstring'''
global _shared_dataset
a : Tuple = dataset
a : List[Any] = []
a : int = partial(_find_cluster_extremes_shared , jaccard_threshold=_lowercase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowercase , _lowercase , ) , total=len(_lowercase ) , ):
extremes_list.append(_lowercase )
return extremes_list
def _SCREAMING_SNAKE_CASE ( _lowercase : Type[Dataset] , _lowercase : float = 0.85 ) ->Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
a : str = make_duplicate_clusters(_lowercase , _lowercase )
a : List[Any] = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
a : List[Any] = {}
a : str = find_extremes(_lowercase , _lowercase , _lowercase )
for extremes in extremes_clusters:
for element in extremes:
a : Optional[Any] = element
a : Union[str, Any] = duplicate_indices - set(extreme_dict.keys() )
a : Union[str, Any] = dataset.filter(lambda _lowercase , _lowercase : idx not in remove_indices , with_indices=_lowercase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
a : Union[str, Any] = element["base_index"] in extreme_dict
if element["is_extreme"]:
a : Optional[int] = extreme_dict[element["base_index"]]["copies"]
print(F"""Original dataset size: {len(_lowercase )}""" )
print(F"""Number of duplicate clusters: {len(_lowercase )}""" )
print(F"""Files in duplicate cluster: {len(_lowercase )}""" )
print(F"""Unique files in duplicate cluster: {len(_lowercase )}""" )
print(F"""Filtered dataset size: {len(_lowercase )}""" )
return ds_filter, duplicate_clusters
| 79 | 0 |
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 89 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A: Optional[Any] = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[int] = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: List[str] = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A: Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 | 0 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_A = """CompVis/stable-diffusion-v1-1"""
_A = """CompVis/stable-diffusion-v1-2"""
_A = """CompVis/stable-diffusion-v1-3"""
_A = """CompVis/stable-diffusion-v1-4"""
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , ):
"""simple docstring"""
super()._init_()
UpperCAmelCase__ : int = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
UpperCAmelCase__ : str = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = StableDiffusionPipeline.from_pretrained(_lowerCamelCase )
UpperCAmelCase__ : Dict = StableDiffusionPipeline(
vae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , unet=_lowerCamelCase , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , requires_safety_checker=_lowerCamelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _a (self ):
"""simple docstring"""
return {k: getattr(self , _lowerCamelCase ) for k in self.config.keys() if not k.startswith("""_""" )}
def _a (self , _lowerCamelCase = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase__ : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCamelCase )
def _a (self ):
"""simple docstring"""
self.enable_attention_slicing(_lowerCamelCase )
@torch.no_grad()
def _a (self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ):
"""simple docstring"""
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def _a (self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ):
"""simple docstring"""
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def _a (self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ):
"""simple docstring"""
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def _a (self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ):
"""simple docstring"""
return self.pipea(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
@torch.no_grad()
def _a (self , _lowerCamelCase , _lowerCamelCase = 512 , _lowerCamelCase = 512 , _lowerCamelCase = 50 , _lowerCamelCase = 7.5 , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 1 , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : Any = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(_lowerCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCAmelCase__ : Optional[int] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCAmelCase__ : List[Any] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCAmelCase__ : Optional[int] = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCAmelCase__ : Any = self.textaimg_sda_a(
prompt=_lowerCamelCase , height=_lowerCamelCase , width=_lowerCamelCase , num_inference_steps=_lowerCamelCase , guidance_scale=_lowerCamelCase , negative_prompt=_lowerCamelCase , num_images_per_prompt=_lowerCamelCase , eta=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , output_type=_lowerCamelCase , return_dict=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=_lowerCamelCase , **_lowerCamelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 363 |
"""simple docstring"""
_A = range(2, 20 + 1)
_A = [10**k for k in range(ks[-1] + 1)]
_A = {}
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int:
UpperCAmelCase__ : List[str] = sum(a_i[j] for j in range(lowerCAmelCase , len(lowerCAmelCase ) ) )
UpperCAmelCase__ : str = sum(a_i[j] * base[j] for j in range(min(len(lowerCAmelCase ) , lowerCAmelCase ) ) )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = 0, 0
UpperCAmelCase__ : Optional[Any] = n - i
UpperCAmelCase__ : Union[str, Any] = memo.get(lowerCAmelCase )
if sub_memo is not None:
UpperCAmelCase__ : Any = sub_memo.get(lowerCAmelCase )
if jumps is not None and len(lowerCAmelCase ) > 0:
# find and make the largest jump without going over
UpperCAmelCase__ : Optional[int] = -1
for _k in range(len(lowerCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCAmelCase__ : str = _k
break
if max_jump >= 0:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCAmelCase__ : Any = diff + c
for j in range(min(lowerCAmelCase , len(lowerCAmelCase ) ) ):
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = divmod(lowerCAmelCase , 10 )
if new_c > 0:
add(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
UpperCAmelCase__ : int = []
else:
UpperCAmelCase__ : Union[str, Any] = {c: []}
UpperCAmelCase__ : Union[str, Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = next_term(lowerCAmelCase , k - 1 , i + dn , lowerCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = compute(lowerCAmelCase , lowerCAmelCase , i + dn , lowerCAmelCase )
diff += _diff
dn += terms_jumped
UpperCAmelCase__ : str = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCAmelCase__ : Any = 0
while j < len(lowerCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCAmelCase , (diff, dn, k) )
return (diff, dn)
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
if i >= n:
return 0, i
if k > len(lowerCAmelCase ):
a_i.extend([0 for _ in range(k - len(lowerCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCAmelCase__ : Tuple = i
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = 0, 0, 0
for j in range(len(lowerCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCAmelCase__ : Dict = ds_c + ds_b
diff += addend
UpperCAmelCase__ : Tuple = 0
for j in range(lowerCAmelCase ):
UpperCAmelCase__ : Tuple = a_i[j] + addend
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = divmod(lowerCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return diff, i - start_i
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
for j in range(lowerCAmelCase , len(lowerCAmelCase ) ):
UpperCAmelCase__ : Optional[Any] = digits[j] + addend
if s >= 10:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = divmod(lowerCAmelCase , 10 )
UpperCAmelCase__ : Any = addend // 10 + quotient
else:
UpperCAmelCase__ : Optional[Any] = s
UpperCAmelCase__ : Tuple = addend // 10
if addend == 0:
break
while addend > 0:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = divmod(lowerCAmelCase , 10 )
digits.append(lowerCAmelCase )
def a__ ( lowerCAmelCase = 10**15 ) -> int:
UpperCAmelCase__ : Optional[int] = [1]
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Dict = 0
while True:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = next_term(lowerCAmelCase , 20 , i + dn , lowerCAmelCase )
dn += terms_jumped
if dn == n - i:
break
UpperCAmelCase__ : Optional[int] = 0
for j in range(len(lowerCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 166 | 0 |
import math
from datetime import datetime, timedelta
def _a ( UpperCamelCase_ : int ) -> datetime:
"""simple docstring"""
lowerCAmelCase__ = year % 19
lowerCAmelCase__ = year % 4
lowerCAmelCase__ = year % 7
lowerCAmelCase__ = math.floor(year / 100 )
lowerCAmelCase__ = math.floor((13 + 8 * leap_day_inhibits) / 25 )
lowerCAmelCase__ = leap_day_inhibits / 4
lowerCAmelCase__ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
lowerCAmelCase__ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
lowerCAmelCase__ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
lowerCAmelCase__ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase_ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase_ , 4 , 18 )
else:
return datetime(UpperCamelCase_ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
a_ = '''will be''' if year > datetime.now().year else '''was'''
print(F"Easter in {year} {tense} {gauss_easter(year)}")
| 340 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ = (3, 9, -11, 0, 7, 5, 1, -1)
a_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase__ :
a_ =42
a_ =42
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = None
for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ):
lowerCAmelCase__ = Node(__UpperCAmelCase , self.head )
def __iter__( self )-> Iterator[int]:
'''simple docstring'''
lowerCAmelCase__ = self.head
while node:
yield node.data
lowerCAmelCase__ = node.next_node
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self )-> str:
'''simple docstring'''
return " -> ".join([str(__UpperCAmelCase ) for node in self] )
def _a ( UpperCamelCase_ : SortedLinkedList , UpperCamelCase_ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 340 | 1 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : int=8 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=9_9 , UpperCAmelCase__ : Union[str, Any]=1_6 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Union[str, Any]=3_6 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Any=5_1_2 , UpperCAmelCase__ : Any=1_6 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Optional[int]=None , ) -> Optional[Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : int ) -> List[str]:
lowerCAmelCase = self.get_config()
lowerCAmelCase = 3_0_0
return config
def __UpperCAmelCase ( self : int ) -> Any:
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict ) -> str:
lowerCAmelCase = MraModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , ) -> Tuple:
lowerCAmelCase = True
lowerCAmelCase = MraModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ) -> Union[str, Any]:
lowerCAmelCase = MraForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] ) -> Optional[Any]:
lowerCAmelCase = MraForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict ) -> List[str]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = MraForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int ) -> Dict:
lowerCAmelCase = self.num_labels
lowerCAmelCase = MraForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ) -> int:
lowerCAmelCase = self.num_choices
lowerCAmelCase = MraForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : str = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase : List[Any] = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : List[str] = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : str = ()
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase = MraModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def __UpperCAmelCase ( self : str ) -> List[str]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Tuple ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = MraModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason='MRA does not output attentions' )
def __UpperCAmelCase ( self : int ) -> Tuple:
return
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
lowerCAmelCase = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )[0]
lowerCAmelCase = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self : int ) -> Any:
lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )[0]
lowerCAmelCase = 5_0_2_6_5
lowerCAmelCase = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self : Tuple ) -> Any:
lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
lowerCAmelCase = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )[0]
lowerCAmelCase = 5_0_2_6_5
lowerCAmelCase = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
| 364 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__snake_case =logging.getLogger(__name__)
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : List[str] , UpperCAmelCase__ : Optional[Any]=-1 ) -> Dict:
# in NER datasets, the last column is usually reserved for NER label
lowerCAmelCase = label_idx
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = mode.value
lowerCAmelCase = os.path.join(UpperCAmelCase__ , F'''{mode}.txt''' )
lowerCAmelCase = 1
lowerCAmelCase = []
with open(UpperCAmelCase__ , encoding='utf-8' ) as f:
lowerCAmelCase = []
lowerCAmelCase = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
lowerCAmelCase = []
lowerCAmelCase = []
else:
lowerCAmelCase = line.split(' ' )
words.append(splits[0] )
if len(UpperCAmelCase__ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
return examples
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Any:
lowerCAmelCase = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(UpperCAmelCase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(UpperCAmelCase__ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , 'r' ) as f:
lowerCAmelCase = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] ) -> str:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , 'r' ) as f:
lowerCAmelCase = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCAmelCase_ ( __lowercase ):
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = mode.value
lowerCAmelCase = os.path.join(UpperCAmelCase__ , F'''{mode}.txt''' )
lowerCAmelCase = 1
lowerCAmelCase = []
with open(UpperCAmelCase__ , encoding='utf-8' ) as f:
for sentence in parse_incr(UpperCAmelCase__ ):
lowerCAmelCase = []
lowerCAmelCase = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> str:
lowerCAmelCase = 0
for sentence in parse_incr(UpperCAmelCase__ ):
lowerCAmelCase = preds_list[example_id]
lowerCAmelCase = ''
for token in sentence:
out += F'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(UpperCAmelCase__ )
example_id += 1
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : str ) -> List[str]:
if path:
with open(UpperCAmelCase__ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 55 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
lowerCamelCase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
lowerCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
snake_case = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the training data.'''} )
snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the validation data.'''} )
snake_case = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
snake_case = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
snake_case = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
snake_case = field(
default=snake_case_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
snake_case = field(
default=snake_case_ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = {}
if self.train_dir is not None:
_A = self.train_dir
if self.validation_dir is not None:
_A = self.validation_dir
_A = data_files if data_files else None
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
snake_case = field(
default=snake_case_ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
snake_case = field(
default=snake_case_ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
snake_case = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
snake_case = field(default=snake_case_ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
snake_case = field(
default=snake_case_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
snake_case = field(
default=snake_case_ , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
snake_case = field(
default=snake_case_ , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : Optional[int]=192 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=0.6 ):
'''simple docstring'''
_A = input_size
_A = mask_patch_size
_A = model_patch_size
_A = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("Input size must be divisible by mask patch size" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("Mask patch size must be divisible by model patch size" )
_A = self.input_size // self.mask_patch_size
_A = self.mask_patch_size // self.model_patch_size
_A = self.rand_size**2
_A = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : Any ):
'''simple docstring'''
_A = np.random.permutation(self.token_count )[: self.mask_count]
_A = np.zeros(self.token_count , dtype=__UpperCAmelCase )
_A = 1
_A = mask.reshape((self.rand_size, self.rand_size) )
_A = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def __lowercase ( __lowercase ) -> str:
'''simple docstring'''
_A = torch.stack([example["pixel_values"] for example in examples] )
_A = torch.stack([example["mask"] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def __lowercase ( ) -> Dict:
'''simple docstring'''
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mim" , __lowercase , __lowercase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A = training_args.get_process_log_level()
logger.setLevel(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
_A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_A = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0:
_A = ds["train"].train_test_split(data_args.train_val_split )
_A = split["train"]
_A = split["test"]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
_A = AutoConfig.from_pretrained(model_args.config_name_or_path , **__lowercase )
elif model_args.model_name_or_path:
_A = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
_A = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(__lowercase , "decoder_type" ):
_A = "simmim"
# adapt config
_A = model_args.image_size if model_args.image_size is not None else config.image_size
_A = model_args.patch_size if model_args.patch_size is not None else config.patch_size
_A = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"image_size": model_args.image_size,
"patch_size": model_args.patch_size,
"encoder_stride": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
_A = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__lowercase )
elif model_args.model_name_or_path:
_A = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowercase )
else:
_A = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
_A = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
_A = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
_A = AutoModelForMaskedImageModeling.from_config(__lowercase )
if training_args.do_train:
_A = ds["train"].column_names
else:
_A = ds["validation"].column_names
if data_args.image_column_name is not None:
_A = data_args.image_column_name
elif "image" in column_names:
_A = "image"
elif "img" in column_names:
_A = "img"
else:
_A = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
_A = Compose(
[
Lambda(lambda __lowercase : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
_A = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(__lowercase ):
_A = [transforms(__lowercase ) for image in examples[image_column_name]]
_A = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_A = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__lowercase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_A = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__lowercase )
# Initialize our trainer
_A = Trainer(
model=__lowercase , args=__lowercase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , )
# Training
if training_args.do_train:
_A = None
if training_args.resume_from_checkpoint is not None:
_A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A = last_checkpoint
_A = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_A = trainer.evaluate()
trainer.log_metrics("eval" , __lowercase )
trainer.save_metrics("eval" , __lowercase )
# Write model card and (optionally) push to hub
_A = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "masked-image-modeling",
"dataset": data_args.dataset_name,
"tags": ["masked-image-modeling"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
if __name__ == "__main__":
main()
| 79 |
import numpy as np
def UpperCAmelCase ( a_ , a_ , a_ = 1E-12 , a_ = 1_0_0 , ) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(a_ )[0] == np.shape(a_ )[1]
# Ensure proper dimensionality.
assert np.shape(a_ )[0] == np.shape(a_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(a_ ) == np.iscomplexobj(a_ )
__A = np.iscomplexobj(a_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(a_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__A = False
__A = 0
__A = 0
__A = 1E12
while not convergence:
# Multiple matrix by the vector.
__A = np.dot(a_ , a_ )
# Normalize the resulting output vector.
__A = w / np.linalg.norm(a_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__A = vector.conj().T if is_complex else vector.T
__A = np.dot(a_ , np.dot(a_ , a_ ) )
# Check convergence.
__A = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__A = True
__A = lambda_
if is_complex:
__A = np.real(lambda_ )
return lambda_, vector
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
__A = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] )
__A = np.array([4_1, 4, 2_0] )
__A = real_input_matrix.astype(np.complexaaa )
__A = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__A = np.array([4_1, 4, 2_0] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__A = real_input_matrix
__A = real_vector
elif problem_type == "complex":
__A = complex_input_matrix
__A = complex_vector
# Our implementation.
__A , __A = power_iteration(a_ , a_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__A , __A = np.linalg.eigh(a_ )
# Last eigenvalue is the maximum one.
__A = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__A = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(a_ ) - np.abs(a_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 15 | 0 |
from math import asin, atan, cos, radians, sin, sqrt, tan
SCREAMING_SNAKE_CASE :Union[str, Any] = 637_8137.0
SCREAMING_SNAKE_CASE :List[Any] = 635_6752.31_4245
SCREAMING_SNAKE_CASE :Dict = 637_8137
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> float:
"""simple docstring"""
UpperCamelCase_ = (AXIS_A - AXIS_B) / AXIS_A
UpperCamelCase_ = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase_ = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase_ = radians(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = radians(SCREAMING_SNAKE_CASE_ )
# Equation
UpperCamelCase_ = sin((phi_a - phi_a) / 2 )
UpperCamelCase_ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
UpperCamelCase_ = sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE_ ) * cos(SCREAMING_SNAKE_CASE_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __magic_name__ :
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=4 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.1 , _lowercase=True , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , )-> str:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_multiple_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = weight_tying
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = num_choices
UpperCamelCase_ = scope
def UpperCAmelCase_ ( self )-> int:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase_ ( self )-> int:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = True
return config, input_ids, input_mask, token_labels
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Optional[Any]:
UpperCamelCase_ = GPTNeoXJapaneseModel(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase )
UpperCamelCase_ = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Dict:
UpperCamelCase_ = True
UpperCamelCase_ = GPTNeoXJapaneseModel(_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase )-> List[str]:
UpperCamelCase_ = GPTNeoXJapaneseForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Dict:
UpperCamelCase_ = True
UpperCamelCase_ = GPTNeoXJapaneseForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
# first forward pass
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase , use_cache=_lowercase )
UpperCamelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase , output_hidden_states=_lowercase )
UpperCamelCase_ = output_from_no_past["hidden_states"][0]
UpperCamelCase_ = model(
_lowercase , attention_mask=_lowercase , past_key_values=_lowercase , output_hidden_states=_lowercase , )["hidden_states"][0]
# select random slice
UpperCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1e-3 ) )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase_ :Optional[Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
UpperCamelCase_ :str = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
UpperCamelCase_ :int = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
UpperCamelCase_ :int = False
UpperCamelCase_ :Dict = False
UpperCamelCase_ :List[str] = False
UpperCamelCase_ :int = False
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ = GPTNeoXJapaneseModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def UpperCAmelCase_ ( self )-> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> Any:
# This regression test was failing with PyTorch < 1.3
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase_ = None
self.model_tester.create_and_check_model_as_decoder(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowercase , _lowercase , _lowercase )
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowercase )
@slow
def UpperCAmelCase_ ( self )-> Dict:
UpperCamelCase_ = "abeja/gpt-neox-japanese-2.7b"
UpperCamelCase_ = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
UpperCamelCase_ = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
UpperCamelCase_ = GPTNeoXJapaneseTokenizer.from_pretrained(_lowercase )
UpperCamelCase_ = GPTNeoXJapaneseForCausalLM.from_pretrained(_lowercase )
UpperCamelCase_ = []
for prompt in prompts:
UpperCamelCase_ = tokenizer(_lowercase , return_tensors="pt" ).input_ids
UpperCamelCase_ = model.generate(_lowercase , max_length=50 )
UpperCamelCase_ = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
predicted_outputs += generated_string
self.assertListEqual(_lowercase , _lowercase )
| 60 | 0 |
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : int =(num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _A ( ):
"""simple docstring"""
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase ( unittest.TestCase):
def __init__( self : int , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any]=13 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : List[Any]=2_24 , _lowerCamelCase : Tuple=30 , _lowerCamelCase : List[str]=4_00 , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : int=True , _lowerCamelCase : Any=[0.5, 0.5, 0.5] , _lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
A_ : int = size if size is not None else {'''height''': 18, '''width''': 18}
A_ : Optional[int] = parent
A_ : Any = batch_size
A_ : List[str] = num_channels
A_ : List[str] = image_size
A_ : List[Any] = min_resolution
A_ : str = max_resolution
A_ : Dict = do_resize
A_ : Dict = size
A_ : str = do_normalize
A_ : List[str] = image_mean
A_ : List[str] = image_std
def a_ ( self : Optional[Any] ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( __UpperCAmelCase , unittest.TestCase):
__lowerCAmelCase : Optional[int] = ViTImageProcessor if is_vision_available() else None
def a_ ( self : Dict ):
"""simple docstring"""
A_ : Union[str, Any] = EfficientFormerImageProcessorTester(self )
@property
def a_ ( self : List[Any] ):
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
def a_ ( self : str ):
"""simple docstring"""
pass
def a_ ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : List[str] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : Dict = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Any = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : str = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def a_ ( self : str ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : str = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : List[str] = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 167 | 0 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Dict = MobileBertTokenizer
__UpperCamelCase : Dict = MobileBertTokenizerFast
__UpperCamelCase : List[str] = True
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : List[Any] = filter_non_english
__UpperCamelCase : Optional[Any] = """google/mobilebert-uncased"""
def lowerCAmelCase__ ( self : Optional[Any] ):
super().setUp()
UpperCamelCase_: Union[str, Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCamelCase_: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCamelCase_: int = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Tuple ):
UpperCamelCase_: Tuple = """UNwant\u00E9d,running"""
UpperCamelCase_: Optional[int] = """unwanted, running"""
return input_text, output_text
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Optional[int] = self.tokenizer_class(self.vocab_file )
UpperCamelCase_: Optional[Any] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_a , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def lowerCAmelCase__ ( self : Dict ):
if not self.test_rust_tokenizer:
return
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: int = self.get_rust_tokenizer()
UpperCamelCase_: List[Any] = """UNwant\u00E9d,running"""
UpperCamelCase_: int = tokenizer.tokenize(_a )
UpperCamelCase_: List[Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
UpperCamelCase_: str = tokenizer.encode(_a , add_special_tokens=_a )
UpperCamelCase_: List[str] = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
UpperCamelCase_: List[Any] = self.get_rust_tokenizer()
UpperCamelCase_: List[str] = tokenizer.encode(_a )
UpperCamelCase_: Optional[int] = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# With lower casing
UpperCamelCase_: int = self.get_tokenizer(do_lower_case=_a )
UpperCamelCase_: Dict = self.get_rust_tokenizer(do_lower_case=_a )
UpperCamelCase_: Optional[Any] = """UNwant\u00E9d,running"""
UpperCamelCase_: List[Any] = tokenizer.tokenize(_a )
UpperCamelCase_: Tuple = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
UpperCamelCase_: Tuple = tokenizer.encode(_a , add_special_tokens=_a )
UpperCamelCase_: Optional[int] = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
UpperCamelCase_: Optional[Any] = self.get_rust_tokenizer()
UpperCamelCase_: int = tokenizer.encode(_a )
UpperCamelCase_: str = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Optional[Any] = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Optional[Any] = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Dict = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: int = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Any = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Optional[Any] = BasicTokenizer(do_lower_case=_a , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: List[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
UpperCamelCase_: Dict = {}
for i, token in enumerate(_a ):
UpperCamelCase_: Dict = i
UpperCamelCase_: List[str] = WordpieceTokenizer(vocab=_a , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def lowerCAmelCase__ ( self : Optional[int] ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def lowerCAmelCase__ ( self : str ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def lowerCAmelCase__ ( self : int ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Dict = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Optional[int] = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
UpperCamelCase_: List[str] = tokenizer.encode("""sequence builders""" , add_special_tokens=_a )
UpperCamelCase_: Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_a )
UpperCamelCase_: int = tokenizer.build_inputs_with_special_tokens(_a )
UpperCamelCase_: List[str] = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowerCAmelCase__ ( self : str ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: Optional[int] = self.rust_tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase_: Dict = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
UpperCamelCase_: Dict = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
UpperCamelCase_: Any = tokenizer_r.do_lower_case if hasattr(_a , """do_lower_case""" ) else False
UpperCamelCase_: List[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: int = ["""的""", """人""", """有"""]
UpperCamelCase_: Tuple = """""".join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: Tuple = True
UpperCamelCase_: Any = self.tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase_: int = self.rust_tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase_: List[Any] = tokenizer_p.encode(_a , add_special_tokens=_a )
UpperCamelCase_: str = tokenizer_r.encode(_a , add_special_tokens=_a )
UpperCamelCase_: Tuple = tokenizer_r.convert_ids_to_tokens(_a )
UpperCamelCase_: List[Any] = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
UpperCamelCase_: int = False
UpperCamelCase_: Any = self.rust_tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase_: Dict = self.tokenizer_class.from_pretrained(_a , **_a )
UpperCamelCase_: int = tokenizer_r.encode(_a , add_special_tokens=_a )
UpperCamelCase_: str = tokenizer_p.encode(_a , add_special_tokens=_a )
UpperCamelCase_: Tuple = tokenizer_r.convert_ids_to_tokens(_a )
UpperCamelCase_: Any = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCamelCase_: List[Any] = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
| 350 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = CTRLTokenizer
__UpperCamelCase : int = False
__UpperCamelCase : List[str] = False
def lowerCAmelCase__ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase_: int = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
UpperCamelCase_: int = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase_: Union[str, Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
UpperCamelCase_: Tuple = {"""unk_token""": """<unk>"""}
UpperCamelCase_: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase_: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case_ ) )
def lowerCAmelCase__ ( self : Optional[int] , **snake_case_ : int ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : List[str] ):
UpperCamelCase_: Dict = """adapt react readapt apt"""
UpperCamelCase_: List[str] = """adapt react readapt apt"""
return input_text, output_text
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: str = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase_: List[Any] = """adapt react readapt apt"""
UpperCamelCase_: Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
UpperCamelCase_: int = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase_: Union[str, Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
| 223 | 0 |
_A = 8.314_462 # Unit - J mol-1 K-1
def lowerCamelCase__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCamelCase__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 231 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = FunnelTokenizer
SCREAMING_SNAKE_CASE = FunnelTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[int]:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : str = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , **_SCREAMING_SNAKE_CASE: Union[str, Any]) -> int:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any , **_SCREAMING_SNAKE_CASE: Any) -> str:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: str) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = "UNwant\u00E9d,running"
__lowerCAmelCase : str = "unwanted, running"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = self.tokenizer_class(self.vocab_file)
__lowerCAmelCase : Any = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE) , [7, 4, 5, 10, 8, 9])
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE)
for tokenizer in tokenizers:
__lowerCAmelCase : List[str] = tokenizer("UNwant\u00E9d,running")
__lowerCAmelCase : Optional[int] = len(inputs["input_ids"]) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len)
__lowerCAmelCase : List[str] = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running")
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len) | 269 | 0 |
"""simple docstring"""
import os
from collections.abc import Iterator
def _A ( _a : str = "." ):
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(_a ):
A = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_a )[1] in (".py", ".ipynb"):
yield os.path.join(_a , _a ).lstrip("""./""" )
def _A ( _a : Dict ):
"""simple docstring"""
return f'{i * " "}*' if i else "\n##"
def _A ( _a : str , _a : str ):
"""simple docstring"""
A = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_a ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(_a )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _A ( _a : str = "." ):
"""simple docstring"""
A = """"""
for filepath in sorted(good_file_paths(_a ) ):
A , A = os.path.split(_a )
if filepath != old_path:
A = print_path(_a , _a )
A = (filepath.count(os.sep ) + 1) if filepath else 0
A = f'{filepath}/{filename}'.replace(""" """ , """%20""" )
A = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f'{md_prefix(_a )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md(".")
| 77 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase = Features({'''text''': Value('''string''' )} )
_lowerCamelCase = Features({'''labels''': ClassLabel} )
_lowerCamelCase = "text"
_lowerCamelCase = "labels"
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Dict:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] ,lowerCamelCase_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
A = copy.deepcopy(self )
A = self.label_schema.copy()
A = features[self.label_column]
A = label_schema
return task_template
@property
def UpperCamelCase__ ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 77 | 1 |
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
_SCREAMING_SNAKE_CASE = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
_SCREAMING_SNAKE_CASE = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def UpperCAmelCase_ ( self : int , _A : Tuple , _A : Tuple , _A : str=None , _A : Dict=False , _A : Tuple=False , _A : str=False , ) -> Tuple:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case_ : List[Any] = np.array([re.sub(_A , '' , _A ) for x in predictions] )
snake_case_ : Optional[Any] = np.array([re.sub(_A , '' , _A ) for x in references] )
else:
snake_case_ : Dict = np.asarray(_A )
snake_case_ : Tuple = np.asarray(_A )
if ignore_case:
snake_case_ : List[str] = np.char.lower(_A )
snake_case_ : Any = np.char.lower(_A )
if ignore_punctuation:
snake_case_ : int = string.punctuation.maketrans('' , '' , string.punctuation )
snake_case_ : Tuple = np.char.translate(_A , table=_A )
snake_case_ : str = np.char.translate(_A , table=_A )
if ignore_numbers:
snake_case_ : Optional[int] = string.digits.maketrans('' , '' , string.digits )
snake_case_ : str = np.char.translate(_A , table=_A )
snake_case_ : Union[str, Any] = np.char.translate(_A , table=_A )
snake_case_ : int = predictions == references
return {"exact_match": np.mean(_A ) * 100}
| 327 |
import sys
_SCREAMING_SNAKE_CASE = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def SCREAMING_SNAKE_CASE__ ( __a = N ):
snake_case_ : Optional[Any] = -sys.maxsize - 1
for i in range(len(__a ) - 12 ):
snake_case_ : Optional[Any] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
snake_case_ : int = product
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 327 | 1 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _A ( lowercase = True , *lowercase , **lowercase ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
a =False
if main_process_only:
a =PartialState().local_process_index == 0
return _tqdm(*lowercase , **lowercase , disable=lowercase ) | 215 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _A ( lowercase ):
"""simple docstring"""
a ={}
a =tokenizer(example['''content'''] , truncation=lowercase )['''input_ids''']
a =len(example['''content'''] ) / len(output['''input_ids'''] )
return output
lowerCamelCase_ : Optional[int] = HfArgumentParser(PretokenizationArguments)
lowerCamelCase_ : Optional[Any] = parser.parse_args()
if args.num_workers is None:
lowerCamelCase_ : Tuple = multiprocessing.cpu_count()
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCamelCase_ : Any = time.time()
lowerCamelCase_ : int = load_dataset(args.dataset_name, split="""train""")
print(F'Dataset loaded in {time.time()-t_start:.2f}s')
lowerCamelCase_ : List[str] = time.time()
lowerCamelCase_ : str = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'Dataset tokenized in {time.time()-t_start:.2f}s')
lowerCamelCase_ : Union[str, Any] = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'Data pushed to the hub in {time.time()-t_start:.2f}s') | 215 | 1 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class a_ (A__ , unittest.TestCase ):
__lowerCAmelCase : int = XLNetTokenizer
__lowerCAmelCase : Optional[int] = XLNetTokenizerFast
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : int = True
def __UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : Optional[int] = XLNetTokenizer(__A , keep_accents=__A )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = """<s>"""
_lowerCAmelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(__A ) , 1_0_0_6 )
def __UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = XLNetTokenizer(__A , keep_accents=__A )
_lowerCAmelCase : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] )
_lowerCAmelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase : int = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] )
_lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = XLNetTokenizer(__A , do_lower_case=__A )
_lowerCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = XLNetTokenizer(__A , do_lower_case=__A )
_lowerCAmelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
_lowerCAmelCase : Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
_lowerCAmelCase : int = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
_lowerCAmelCase : int = tokenizer.build_inputs_with_special_tokens(__A )
_lowerCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def __UpperCamelCase ( self ):
# fmt: off
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 309 |
"""simple docstring"""
def _snake_case ( lowercase__ : int = 1_0 ) -> str:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or n < 0:
raise ValueError("""Invalid input""" )
lowerCAmelCase_ :List[str] = 1_0**n
lowerCAmelCase_ :int = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , lowercase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 84 | 0 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def A ( _lowerCamelCase ):
'''simple docstring'''
if is_torch_version("<" , "2.0.0" ) or not hasattr(_lowerCamelCase , "_dynamo" ):
return False
return isinstance(_lowerCamelCase , torch._dynamo.eval_frame.OptimizedModule )
def A ( _lowerCamelCase , _lowerCamelCase = True ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_lowerCAmelCase : Optional[Any] = is_compiled_module(_lowerCamelCase )
if is_compiled:
_lowerCAmelCase : Optional[int] = model
_lowerCAmelCase : Dict = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = model.module
if not keep_fpaa_wrapper:
_lowerCAmelCase : Any = getattr(_lowerCamelCase , "forward" )
_lowerCAmelCase : Any = model.__dict__.pop("_original_forward" , _lowerCamelCase )
if original_forward is not None:
while hasattr(_lowerCamelCase , "__wrapped__" ):
_lowerCAmelCase : Tuple = forward.__wrapped__
if forward == original_forward:
break
_lowerCAmelCase : Optional[Any] = forward
if getattr(_lowerCamelCase , "_converted_to_transformer_engine" , _lowerCamelCase ):
convert_model(_lowerCamelCase , to_transformer_engine=_lowerCamelCase )
if is_compiled:
_lowerCAmelCase : int = model
_lowerCAmelCase : str = compiled_model
return model
def A ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowerCamelCase , _lowerCamelCase )
elif PartialState().local_process_index == 0:
torch.save(_lowerCamelCase , _lowerCamelCase )
@contextmanager
def A ( **_lowerCamelCase ):
'''simple docstring'''
for key, value in kwargs.items():
_lowerCAmelCase : Tuple = str(_lowerCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def A ( _lowerCamelCase ):
'''simple docstring'''
if not hasattr(_lowerCamelCase , "__qualname__" ) and not hasattr(_lowerCamelCase , "__name__" ):
_lowerCAmelCase : Any = getattr(_lowerCamelCase , "__class__" , _lowerCamelCase )
if hasattr(_lowerCamelCase , "__qualname__" ):
return obj.__qualname__
if hasattr(_lowerCamelCase , "__name__" ):
return obj.__name__
return str(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for key, value in source.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Tuple = destination.setdefault(_lowerCamelCase , {} )
merge_dicts(_lowerCamelCase , _lowerCamelCase )
else:
_lowerCAmelCase : int = value
return destination
def A ( _lowerCamelCase = None ):
'''simple docstring'''
if port is None:
_lowerCAmelCase : List[str] = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 300 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_lowerCAmelCase : Tuple = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(_lowerCamelCase ):
os.makedirs(_lowerCamelCase )
_lowerCAmelCase : Any = model.state_dict()
def to_tf_var_name(_lowerCamelCase ):
for patt, repl in iter(_lowerCamelCase ):
_lowerCAmelCase : str = name.replace(_lowerCamelCase , _lowerCamelCase )
return F"bert/{name}"
def create_tf_var(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
_lowerCAmelCase : Optional[int] = tf.get_variable(dtype=_lowerCamelCase , shape=tensor.shape , name=_lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(_lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_lowerCAmelCase : Optional[Any] = to_tf_var_name(_lowerCamelCase )
_lowerCAmelCase : Any = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_lowerCAmelCase : Tuple = torch_tensor.T
_lowerCAmelCase : str = create_tf_var(tensor=_lowerCamelCase , name=_lowerCamelCase , session=_lowerCamelCase )
tf.keras.backend.set_value(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = session.run(_lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(_lowerCamelCase , _lowerCamelCase )}" )
_lowerCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(_lowerCamelCase , os.path.join(_lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def A ( _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Directory in which to save tensorflow model" )
_lowerCAmelCase : Optional[Any] = parser.parse_args(_lowerCamelCase )
_lowerCAmelCase : List[Any] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=_lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 300 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A ( __UpperCAmelCase ):
lowerCamelCase : Union[str, Any] = (DEISMultistepScheduler,)
lowerCamelCase : Optional[Any] = (("""num_inference_steps""", 25),)
def A__ ( self , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
lowercase__ = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**lowerCamelCase__ )
return config
def A__ ( self , lowerCamelCase__=0 , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("""num_inference_steps""" , lowerCamelCase__ )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**lowerCamelCase__ )
lowercase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
lowercase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase__ , lowercase__ = sample, sample
for t in range(lowerCamelCase__ , time_step + scheduler.config.solver_order + 1 ):
lowercase__ = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
lowercase__ = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def A__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def A__ ( self , lowerCamelCase__=0 , **lowerCamelCase__ ) -> str:
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("""num_inference_steps""" , lowerCamelCase__ )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
lowercase__ = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase__ = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
lowercase__ = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def A__ ( self , lowerCamelCase__=None , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
if scheduler is None:
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**lowerCamelCase__ )
lowercase__ = scheduler_class(**lowerCamelCase__ )
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**lowerCamelCase__ )
lowercase__ = scheduler_class(**lowerCamelCase__ )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
return sample
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("""num_inference_steps""" , lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase__ )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ , """set_timesteps""" ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase__ = dummy_past_residuals[: scheduler.config.solver_order]
lowercase__ = scheduler.timesteps[5]
lowercase__ = scheduler.timesteps[6]
lowercase__ = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
lowercase__ = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = DEISMultistepScheduler(**self.get_scheduler_config() )
lowercase__ = self.full_loop(scheduler=lowerCamelCase__ )
lowercase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
lowercase__ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowercase__ = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowercase__ = UniPCMultistepScheduler.from_config(scheduler.config )
lowercase__ = DEISMultistepScheduler.from_config(scheduler.config )
lowercase__ = self.full_loop(scheduler=lowerCamelCase__ )
lowercase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def A__ ( self ) -> List[str]:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def A__ ( self ) -> Any:
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase__ , prediction_type=lowerCamelCase__ , sample_max_value=lowerCamelCase__ , algorithm_type="""deis""" , solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
lowercase__ = self.full_loop(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
assert not torch.isnan(lowerCamelCase__ ).any(), "Samples have nan numbers"
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(lower_order_final=lowerCamelCase__ )
self.check_over_configs(lower_order_final=lowerCamelCase__ )
def A__ ( self ) -> Dict:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=lowerCamelCase__ , time_step=0 )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = self.full_loop()
lowercase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = self.full_loop(prediction_type="""v_prediction""" )
lowercase__ = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.0_91 ) < 1e-3
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(thresholding=lowerCamelCase__ , dynamic_thresholding_ratio=0 )
lowercase__ = scheduler_class(**lowerCamelCase__ )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 164 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Union[str, Any] = StableDiffusionInstructPixaPixPipeline
lowerCamelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
lowerCamelCase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A__ ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ = CLIPTextModel(lowerCamelCase__ )
lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("""RGB""" )
if str(lowerCamelCase__ ).startswith("""mps""" ):
lowercase__ = torch.manual_seed(lowerCamelCase__ )
else:
lowercase__ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
lowercase__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
lowercase__ = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = sd_pipe(**lowerCamelCase__ ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
lowercase__ = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = """french fries"""
lowercase__ = sd_pipe(**lowerCamelCase__ , negative_prompt=lowerCamelCase__ )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
lowercase__ = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = [inputs["""prompt"""]] * 2
lowercase__ = np.array(inputs["""image"""] ).astype(np.floataa ) / 2_55.0
lowercase__ = torch.from_numpy(lowerCamelCase__ ).unsqueeze(0 ).to(lowerCamelCase__ )
lowercase__ = image / 2 + 0.5
lowercase__ = image.permute(0 , 3 , 1 , 2 )
lowercase__ = image.repeat(2 , 1 , 1 , 1 )
lowercase__ = sd_pipe(**lowerCamelCase__ ).images
lowercase__ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
lowercase__ = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" )
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
lowercase__ = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = self.get_dummy_inputs(lowerCamelCase__ )
lowercase__ = sd_pipe(**lowerCamelCase__ ).images
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = [round(lowerCamelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(lowerCamelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase__ )
lowercase__ = VaeImageProcessor(do_resize=lowerCamelCase__ , do_normalize=lowerCamelCase__ )
lowercase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = pipe(**self.get_dummy_inputs_by_type(lowerCamelCase__ , input_image_type="""pt""" ) )[0]
lowercase__ = components["""vae"""]
lowercase__ = self.get_dummy_inputs_by_type(lowerCamelCase__ , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowercase__ = vae.encode(inputs[image_param] ).latent_dist.mode()
lowercase__ = pipe(**lowerCamelCase__ )[0]
lowercase__ = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCamelCase__ , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self , lowerCamelCase__=0 ) -> int:
'''simple docstring'''
lowercase__ = torch.manual_seed(lowerCamelCase__ )
lowercase__ = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
lowercase__ = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase__ ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ )
lowercase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase__ ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ )
lowercase__ = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase__ ).images
lowercase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def A__ ( self ) -> int:
'''simple docstring'''
lowercase__ = 0
def callback_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> None:
lowercase__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase__ = latents[0, -3:, -3:, -1]
lowercase__ = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowercase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase__ = latents[0, -3:, -3:, -1]
lowercase__ = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowercase__ = False
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa )
lowercase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
lowercase__ = self.get_inputs()
pipe(**lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa )
lowercase__ = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__ = self.get_inputs()
lowercase__ = pipe(**lowerCamelCase__ )
lowercase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase__ = inputs["""image"""].resize((504, 504) )
lowercase__ = """timbrooks/instruct-pix2pix"""
lowercase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCamelCase__ , safety_checker=lowerCamelCase__ , )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
lowercase__ = pipe(**lowerCamelCase__ )
lowercase__ = output.images[0]
lowercase__ = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
lowercase__ = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 164 | 1 |
from math import loga
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
from math import isclose, sqrt
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> tuple[float, float, float]:
"""simple docstring"""
snake_case_ : Dict = point_y / 4 / point_x
snake_case_ : List[str] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
snake_case_ : Union[str, Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
snake_case_ : Tuple = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
snake_case_ : Union[str, Any] = outgoing_gradient**2 + 4
snake_case_ : Tuple = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
snake_case_ : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
snake_case_ : Dict = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
snake_case_ : Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
snake_case_ : Any = x_minus if isclose(_UpperCamelCase , _UpperCamelCase ) else x_plus
snake_case_ : int = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowerCamelCase_ ( _UpperCamelCase = 1.4 , _UpperCamelCase = -9.6 ) -> int:
"""simple docstring"""
snake_case_ : int = 0
snake_case_ : float = first_x_coord
snake_case_ : float = first_y_coord
snake_case_ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
snake_case_ , snake_case_ , snake_case_ : str = next_point(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 279 | 0 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __magic_name__ :
SCREAMING_SNAKE_CASE = LEDConfig
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = 'gelu'
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=False , __snake_case=99 , __snake_case=32 , __snake_case=2 , __snake_case=4 , __snake_case=37 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=20 , __snake_case=2 , __snake_case=1 , __snake_case=0 , __snake_case=4 , ) -> Optional[int]:
'''simple docstring'''
__a =parent
__a =batch_size
__a =seq_length
__a =is_training
__a =use_labels
__a =vocab_size
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =intermediate_size
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =eos_token_id
__a =pad_token_id
__a =bos_token_id
__a =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__a =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__a =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__a =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__a =tf.concat([input_ids, eos_tensor] , axis=1 )
__a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__a =prepare_led_inputs_dict(__snake_case , __snake_case , __snake_case )
__a =tf.concat(
[tf.zeros_like(__snake_case )[:, :-1], tf.ones_like(__snake_case )[:, -1:]] , axis=-1 , )
__a =global_attention_mask
return config, inputs_dict
def __magic_name__ ( self , __snake_case , __snake_case ) -> int:
'''simple docstring'''
__a =TFLEDModel(config=__snake_case ).get_decoder()
__a =inputs_dict['input_ids']
__a =input_ids[:1, :]
__a =inputs_dict['attention_mask'][:1, :]
__a =1
# first forward pass
__a =model(__snake_case , attention_mask=__snake_case , use_cache=__snake_case )
__a , __a =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a =ids_tensor((self.batch_size, 3) , config.vocab_size )
__a =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__a =tf.concat([input_ids, next_tokens] , axis=-1 )
__a =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__a =model(__snake_case , attention_mask=__snake_case )[0]
__a =model(__snake_case , attention_mask=__snake_case , past_key_values=__snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__a =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__a =output_from_no_past[:, -3:, random_slice_idx]
__a =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__snake_case , __snake_case , rtol=1e-3 )
def UpperCamelCase_( _snake_case : Dict , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : List[str]=None , _snake_case : List[str]=None , _snake_case : Optional[Any]=None , _snake_case : Any=None , ):
"""simple docstring"""
if attention_mask is None:
__a =tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__a =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__a =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__a =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =TFLEDModelTester(self )
__a =ConfigTester(self , config_class=__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a , __a =self.model_tester.prepare_config_and_inputs_for_common()
__a =tf.zeros_like(inputs_dict['attention_mask'] )
__a =2
__a =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
__a =True
__a =self.model_tester.seq_length
__a =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__snake_case ):
__a =outputs.decoder_attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__snake_case ):
__a =[t.numpy() for t in outputs.encoder_attentions]
__a =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__a =True
__a =False
__a =False
__a =model_class(__snake_case )
__a =model(self._prepare_for_class(__snake_case , __snake_case ) )
__a =len(__snake_case )
self.assertEqual(config.output_hidden_states , __snake_case )
check_encoder_attentions_output(__snake_case )
if self.is_encoder_decoder:
__a =model_class(__snake_case )
__a =model(self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(config.output_hidden_states , __snake_case )
check_decoder_attentions_output(__snake_case )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__a =True
__a =model_class(__snake_case )
__a =model(self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(config.output_hidden_states , __snake_case )
check_encoder_attentions_output(__snake_case )
# Check attention is always last and order is fine
__a =True
__a =True
__a =model_class(__snake_case )
__a =model(self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__snake_case ) )
self.assertEqual(model.config.output_hidden_states , __snake_case )
check_encoder_attentions_output(__snake_case )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
pass
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
# TODO: Head-masking not yet implement
pass
def UpperCamelCase_( _snake_case : Dict ):
"""simple docstring"""
return tf.constant(_snake_case , dtype=tf.intaa )
_lowerCAmelCase : Optional[int] = 1E-4
@slow
@require_tf
class __magic_name__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
__a =_long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
__a =_long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
__a =prepare_led_inputs_dict(model.config , __snake_case , __snake_case )
__a =model(**__snake_case )[0]
__a =(1, 1024, 768)
self.assertEqual(output.shape , __snake_case )
# change to expected output here
__a =tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __snake_case , atol=1e-3 )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
__a =_long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
__a =_long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
__a =prepare_led_inputs_dict(model.config , __snake_case , __snake_case )
__a =model(**__snake_case )[0]
__a =(1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __snake_case )
# change to expected output here
__a =tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __snake_case , atol=1e-3 , rtol=1e-3 )
| 218 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_lowerCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 218 | 1 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
SCREAMING_SNAKE_CASE_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase__ ( datasets.BuilderConfig ):
'''simple docstring'''
__snake_case : Optional[datasets.Features] = None
__snake_case : str = "utf-8"
__snake_case : Optional[str] = None
__snake_case : Optional[str] = None
__snake_case : bool = True # deprecated
__snake_case : Optional[int] = None # deprecated
__snake_case : int = 10 << 20 # 10MB
__snake_case : Optional[bool] = None
class UpperCamelCase__ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__snake_case : str = JsonConfig
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
SCREAMING_SNAKE_CASE = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase__ ,(str, list, tuple) ):
SCREAMING_SNAKE_CASE = data_files
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = [files]
SCREAMING_SNAKE_CASE = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"""files""": files} )]
SCREAMING_SNAKE_CASE = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = [files]
SCREAMING_SNAKE_CASE = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCamelCase__ ,gen_kwargs={"""files""": files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
SCREAMING_SNAKE_CASE = self.config.features.arrow_schema.field(lowerCamelCase__ ).type
SCREAMING_SNAKE_CASE = pa_table.append_column(lowerCamelCase__ ,pa.array([None] * len(lowerCamelCase__ ) ,type=lowerCamelCase__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE = table_cast(lowerCamelCase__ ,self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCamelCase__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(lowerCamelCase__ ,encoding=self.config.encoding ,errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE = json.load(lowerCamelCase__ )
# We keep only the field we are interested in
SCREAMING_SNAKE_CASE = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(lowerCamelCase__ ,(list, tuple) ):
SCREAMING_SNAKE_CASE = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE = {col: [row.get(lowerCamelCase__ ) for row in dataset] for col in keys}
else:
SCREAMING_SNAKE_CASE = dataset
SCREAMING_SNAKE_CASE = pa.Table.from_pydict(lowerCamelCase__ )
yield file_idx, self._cast_table(lowerCamelCase__ )
# If the file has one json object per line
else:
with open(lowerCamelCase__ ,"""rb""" ) as f:
SCREAMING_SNAKE_CASE = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
SCREAMING_SNAKE_CASE = max(self.config.chunksize // 32 ,16 << 10 )
SCREAMING_SNAKE_CASE = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
SCREAMING_SNAKE_CASE = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(lowerCamelCase__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
SCREAMING_SNAKE_CASE = batch.decode(self.config.encoding ,errors=lowerCamelCase__ ).encode("""utf-8""" )
try:
while True:
try:
SCREAMING_SNAKE_CASE = paj.read_json(
io.BytesIO(lowerCamelCase__ ) ,read_options=paj.ReadOptions(block_size=lowerCamelCase__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(lowerCamelCase__ ,pa.ArrowInvalid )
and "straddling" not in str(lowerCamelCase__ )
or block_size > len(lowerCamelCase__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(lowerCamelCase__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
lowerCamelCase__ ,encoding=self.config.encoding ,errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE = json.load(lowerCamelCase__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(lowerCamelCase__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): # list is the only sequence type supported in JSON
try:
SCREAMING_SNAKE_CASE = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE = {col: [row.get(lowerCamelCase__ ) for row in dataset] for col in keys}
SCREAMING_SNAKE_CASE = pa.Table.from_pydict(lowerCamelCase__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(lowerCamelCase__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(lowerCamelCase__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(lowerCamelCase__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCamelCase__ )
batch_idx += 1
| 364 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 193 | 0 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
snake_case_ : int = logging.getLogger(__name__)
snake_case_ : Dict = {"facebook/bart-base": BartForConditionalGeneration}
snake_case_ : str = {"facebook/bart-base": BartTokenizer}
def A () -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=__A , default=__A , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=__A , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=__A , default=__A , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=__A , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__A , )
parser.add_argument(
'''--config_name''' , type=__A , default=__A , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=__A , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=__A , default=__A , help='''Where to store the final ONNX file.''' )
UpperCAmelCase_ = parser.parse_args()
return args
def A (__A : List[str] , __A : Optional[Any]="cpu" ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = model_dict[model_name].from_pretrained(__A ).to(__A )
UpperCAmelCase_ = tokenizer_dict[model_name].from_pretrained(__A )
if model_name in ["facebook/bart-base"]:
UpperCAmelCase_ = 0
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
return huggingface_model, tokenizer
def A (__A : int , __A : int , __A : Optional[Any] , __A : Any , __A : List[str] ) -> Optional[int]:
"""simple docstring"""
model.eval()
UpperCAmelCase_ = None
UpperCAmelCase_ = torch.jit.script(BARTBeamSearchGenerator(__A ) )
with torch.no_grad():
UpperCAmelCase_ = '''My friends are cool but they eat too many carbs.'''
UpperCAmelCase_ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device )
UpperCAmelCase_ = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=__A , max_length=__A , early_stopping=__A , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__A , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __A , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=__A , )
logger.info('''Model exported to {}'''.format(__A ) )
UpperCAmelCase_ = remove_dup_initializers(os.path.abspath(__A ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(__A ) )
UpperCAmelCase_ = onnxruntime.InferenceSession(__A )
UpperCAmelCase_ = ort_sess.run(
__A , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(__A ),
'''max_length''': np.array(__A ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = parse_args()
UpperCAmelCase_ = 5
UpperCAmelCase_ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase_ = torch.device(args.device )
UpperCAmelCase_ , UpperCAmelCase_ = load_model_tokenizer(args.model_name_or_path , __A )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(__A )
if args.max_length:
UpperCAmelCase_ = args.max_length
if args.num_beams:
UpperCAmelCase_ = args.num_beams
if args.output_file_path:
UpperCAmelCase_ = args.output_file_path
else:
UpperCAmelCase_ = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(__A , __A , __A , __A , __A )
if __name__ == "__main__":
main()
| 51 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Dict = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51 | 1 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__UpperCAmelCase = logging.getLogger(__name__)
__UpperCAmelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __a :
__snake_case : Optional[str] = field(
default=lowercase_ ,metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} ,)
__snake_case : Optional[str] = field(
default=lowercase_ ,metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowercase_ )} ,)
__snake_case : Optional[str] = field(
default=lowercase_ ,metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} ,)
__snake_case : Optional[str] = field(
default=lowercase_ ,metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__snake_case : Optional[str] = field(
default=lowercase_ ,metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__snake_case : Optional[str] = field(
default=lowercase_ ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} ,)
__snake_case : bool = field(
default=lowercase_ ,metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} ,)
__snake_case : str = field(
default="""main""" ,metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} ,)
__snake_case : bool = field(
default=lowercase_ ,metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} ,)
def A ( self : Union[str, Any] ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class __a :
__snake_case : Optional[str] = field(
default=lowercase_ ,metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
__snake_case : Optional[str] = field(
default=lowercase_ ,metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__snake_case : Optional[str] = field(default=lowercase_ ,metadata={"""help""": """The input training data file (a text file)."""} )
__snake_case : Optional[str] = field(
default=lowercase_ ,metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} ,)
__snake_case : Optional[str] = field(
default=lowercase_ ,metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} ,)
__snake_case : Optional[str] = field(
default=lowercase_ ,metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} ,)
__snake_case : bool = field(
default=lowercase_ ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__snake_case : Optional[int] = field(
default=5 ,metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} ,)
__snake_case : Optional[int] = field(
default=lowercase_ ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} ,)
__snake_case : Optional[int] = field(
default=lowercase_ ,metadata={"""help""": """The number of processes to use for the preprocessing."""} ,)
__snake_case : float = field(
default=0.15 ,metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
__snake_case : bool = field(
default=lowercase_ ,metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} ,)
def A ( self : Dict ):
if self.train_file is not None:
lowerCAmelCase_ : Union[str, Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCAmelCase_ : Any = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __UpperCamelCase ( lowercase__ : Any , lowercase__ : Dict ) -> str:
'''simple docstring'''
with open(lowercase__ , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ : List[Any] = [json.loads(lowercase__ ) for line in f.read().splitlines() if (len(lowercase__ ) > 0 and not line.isspace())]
assert len(lowercase__ ) == len(lowercase__ )
lowerCAmelCase_ : Optional[int] = {c: dataset[c] for c in dataset.column_names}
lowerCAmelCase_ : Optional[Any] = refs
return Dataset.from_dict(lowercase__ )
def __UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCAmelCase_ : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase_ : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowercase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase_ : Tuple = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowerCAmelCase_ : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'train[:{data_args.validation_split_percentage}%]' , )
lowerCAmelCase_ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'train[{data_args.validation_split_percentage}%:]' , )
else:
lowerCAmelCase_ : Tuple = {}
if data_args.train_file is not None:
lowerCAmelCase_ : List[str] = data_args.train_file
if data_args.validation_file is not None:
lowerCAmelCase_ : str = data_args.validation_file
lowerCAmelCase_ : Optional[Any] = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
lowerCAmelCase_ : Tuple = """text"""
lowerCAmelCase_ : Tuple = load_dataset(lowercase__ , data_files=lowercase__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase_ : Any = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
lowerCAmelCase_ : int = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
lowerCAmelCase_ : Tuple = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
lowerCAmelCase_ : Optional[int] = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowercase__ )
elif model_args.model_name_or_path:
lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
lowerCAmelCase_ : Optional[int] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowerCAmelCase_ : int = AutoModelForMaskedLM.from_config(lowercase__ )
model.resize_token_embeddings(len(lowercase__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCAmelCase_ : Optional[Any] = datasets["""train"""].column_names
else:
lowerCAmelCase_ : List[str] = datasets["""validation"""].column_names
lowerCAmelCase_ : Optional[int] = """text""" if """text""" in column_names else column_names[0]
lowerCAmelCase_ : Any = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(lowercase__ : str ):
# Remove empty lines
lowerCAmelCase_ : List[str] = [line for line in examples["""text"""] if len(lowercase__ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=lowercase__ , truncation=lowercase__ , max_length=data_args.max_seq_length )
lowerCAmelCase_ : Tuple = datasets.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCAmelCase_ : str = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowerCAmelCase_ : Tuple = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowerCAmelCase_ : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCAmelCase_ : Any = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCAmelCase_ : Dict = DataCollatorForWholeWordMask(tokenizer=lowercase__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCAmelCase_ : str = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCAmelCase_ : Optional[Any] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowerCAmelCase_ : int = model_args.model_name_or_path
else:
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : Optional[Any] = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCAmelCase_ : str = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(lowercase__ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
lowerCAmelCase_ : Union[str, Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCAmelCase_ : int = trainer.evaluate()
lowerCAmelCase_ : Any = math.exp(eval_output["""eval_loss"""] )
lowerCAmelCase_ : Union[str, Any] = perplexity
lowerCAmelCase_ : Dict = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(lowercase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
return results
def __UpperCamelCase ( lowercase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 353 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __a ( __UpperCamelCase ):
__snake_case : torch.FloatTensor
__snake_case : torch.FloatTensor
__snake_case : Optional[torch.FloatTensor] = None
class __a ( __UpperCamelCase ,__UpperCamelCase ):
__snake_case : Optional[Any] = 2
@register_to_config
def __init__( self : str , UpperCAmelCase : float = 0.02 , UpperCAmelCase : float = 1_00 , UpperCAmelCase : float = 1.007 , UpperCAmelCase : float = 80 , UpperCAmelCase : float = 0.05 , UpperCAmelCase : float = 50 , ):
# standard deviation of the initial noise distribution
lowerCAmelCase_ : List[Any] = sigma_max
# setable values
lowerCAmelCase_ : int = None
lowerCAmelCase_ : np.IntTensor = None
lowerCAmelCase_ : torch.FloatTensor = None # sigma(t_i)
def A ( self : Any , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ):
return sample
def A ( self : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ):
lowerCAmelCase_ : Dict = num_inference_steps
lowerCAmelCase_ : Dict = np.arange(0 , self.num_inference_steps )[::-1].copy()
lowerCAmelCase_ : str = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
lowerCAmelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowerCAmelCase_ : Dict = torch.tensor(UpperCAmelCase , dtype=torch.floataa , device=UpperCAmelCase )
def A ( self : str , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : Optional[torch.Generator] = None ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCAmelCase_ : List[str] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowerCAmelCase_ : List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCAmelCase_ : Any = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCAmelCase ).to(sample.device )
lowerCAmelCase_ : int = sigma + gamma * sigma
lowerCAmelCase_ : List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def A ( self : Optional[int] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True , ):
lowerCAmelCase_ : List[str] = sample_hat + sigma_hat * model_output
lowerCAmelCase_ : Optional[Any] = (sample_hat - pred_original_sample) / sigma_hat
lowerCAmelCase_ : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCAmelCase , derivative=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def A ( self : List[str] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True , ):
lowerCAmelCase_ : Any = sample_prev + sigma_prev * model_output
lowerCAmelCase_ : Optional[int] = (sample_prev - pred_original_sample) / sigma_prev
lowerCAmelCase_ : str = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCAmelCase , derivative=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : List[str] ):
raise NotImplementedError()
| 28 | 0 |
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def UpperCamelCase ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 101 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ :str = logging.get_logger(__name__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = '''huggingface/label-files'''
lowercase = '''imagenet-1k-id2label.json'''
lowercase = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowercase = {v: k for k, v in idalabel.items()}
lowercase = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowercase = BitConfig(
conv_layer=lowerCAmelCase__ , num_labels=1000 , idalabel=lowerCAmelCase__ , labelaid=lowerCAmelCase__ , )
return config
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if "stem.conv" in name:
lowercase = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
lowercase = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
lowercase = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
lowercase = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
lowercase = '''bit.encoder.''' + name
return name
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
'''simple docstring'''
lowercase = get_config(lowerCAmelCase__ )
# load original model from timm
lowercase = create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ )
timm_model.eval()
# load state_dict of original model
lowercase = timm_model.state_dict()
for key in state_dict.copy().keys():
lowercase = state_dict.pop(lowerCAmelCase__ )
lowercase = val.squeeze() if '''head''' in key else val
# load HuggingFace model
lowercase = BitForImageClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# create image processor
lowercase = create_transform(**resolve_data_config({} , model=lowerCAmelCase__ ) )
lowercase = transform.transforms
lowercase = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowercase = BitImageProcessor(
do_resize=lowerCAmelCase__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase = prepare_img()
lowercase = transform(lowerCAmelCase__ ).unsqueeze(0 )
lowercase = processor(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ )
# verify logits
with torch.no_grad():
lowercase = model(lowerCAmelCase__ )
lowercase = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
lowercase = timm_model(lowerCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(f'ybelkada/{model_name}' )
processor.push_to_hub(f'ybelkada/{model_name}' )
if __name__ == "__main__":
lowercase__ :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
lowercase__ :List[str] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 101 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 134 |
from ..utils import DummyObject, requires_backends
class a (metaclass=_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : int = ["speech"]
def __init__( self : List[Any] , *lowerCamelCase : List[Any] , **lowerCamelCase : Optional[Any] ) -> Dict:
requires_backends(self , ["speech"] )
class a (metaclass=_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["speech"]
def __init__( self : int , *lowerCamelCase : List[Any] , **lowerCamelCase : List[Any] ) -> Optional[int]:
requires_backends(self , ["speech"] )
| 134 | 1 |
"""simple docstring"""
def __magic_name__ ( lowercase ):
if length <= 0 or not isinstance(_A , _A ):
raise ValueError("""Length must be a positive integer.""" )
return [n * (2 * n - 1) for n in range(_A )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=1_0))
| 173 | '''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowercase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''ernie_m'''
UpperCAmelCase_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __lowerCAmelCase = 250002 , __lowerCAmelCase = 768 , __lowerCAmelCase = 12 , __lowerCAmelCase = 12 , __lowerCAmelCase = 3072 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 514 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1E-0_5 , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = classifier_dropout
lowerCAmelCase = is_decoder
lowerCAmelCase = act_dropout
| 272 | 0 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ = "cpu" , lowerCAmelCase__ = "openai/clip-vit-large-patch14" ) -> None:
a : str = device
a : int = CLIPTokenizerFast.from_pretrained(lowerCAmelCase__ )
a : Union[str, Any] = [0.48_145_466, 0.4_578_275, 0.40_821_073]
a : Tuple = [0.26_862_954, 0.26_130_258, 0.27_577_711]
a : int = torchvision.transforms.Normalize(self.image_mean , self.image_std )
a : Optional[int] = torchvision.transforms.Resize(224 )
a : List[str] = torchvision.transforms.CenterCrop(224 )
def __a ( self , lowerCAmelCase__ ) -> Tuple:
a : List[str] = self.resize(lowerCAmelCase__ )
a : str = self.center_crop(lowerCAmelCase__ )
a : Optional[Any] = self.normalize(lowerCAmelCase__ )
return images
def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Any:
a : Union[str, Any] = self.tokenizer(text=lowerCAmelCase__ , **lowerCAmelCase__ )
a : int = self.preprocess_img(lowerCAmelCase__ )
a : int = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__=10 , lowerCAmelCase__=0.01 , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__="image" , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=False , ) -> None:
super().__init__()
a : Any = None
a : Any = device if device else get_device()
if vqgan:
a : Dict = vqgan
else:
a : Dict = load_vqgan(self.device , conf_path=lowerCAmelCase__ , ckpt_path=lowerCAmelCase__ )
self.vqgan.eval()
if clip:
a : int = clip
else:
a : Union[str, Any] = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" )
self.clip.to(self.device )
a : Any = ProcessorGradientFlow(device=self.device )
a : List[str] = iterations
a : List[Any] = lr
a : List[str] = log
a : Dict = make_grid
a : Union[str, Any] = return_val
a : Optional[int] = quantize
a : Any = self.vqgan.decoder.z_shape
def __a ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=5 , lowerCAmelCase__=True ) -> int:
a : Dict = []
if output_path is None:
a : Union[str, Any] = "./animation.gif"
if input_path is None:
a : Dict = self.save_path
a : List[str] = sorted(glob(input_path + "/*" ) )
if not len(lowerCAmelCase__ ):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)" )
if len(lowerCAmelCase__ ) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" )
a : List[str] = total_duration / len(lowerCAmelCase__ )
a : List[str] = [frame_duration] * len(lowerCAmelCase__ )
if extend_frames:
a : Tuple = 1.5
a : List[str] = 3
for file_name in paths:
if file_name.endswith(".png" ):
images.append(imageio.imread(lowerCAmelCase__ ) )
imageio.mimsave(lowerCAmelCase__ , lowerCAmelCase__ , duration=lowerCAmelCase__ )
print(f"""gif saved to {output_path}""" )
def __a ( self , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Dict:
if not (path or img):
raise ValueError("Input either path or tensor" )
if img is not None:
raise NotImplementedError
a : Optional[int] = preprocess(Image.open(lowerCAmelCase__ ) , target_image_size=256 ).to(self.device )
a : Optional[int] = preprocess_vqgan(lowerCAmelCase__ )
a : Optional[int] = self.vqgan.encode(lowerCAmelCase__ )
return z
def __a ( self , lowerCAmelCase__ ) -> Dict:
a : str = self.latent.detach().requires_grad_()
a : Tuple = base_latent + transform_vector
if self.quantize:
a : str = self.vqgan.quantize(lowerCAmelCase__ )
else:
a : List[str] = trans_latent
return self.vqgan.decode(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Tuple:
a : List[str] = self.clip_preprocessor(text=lowerCAmelCase__ , images=lowerCAmelCase__ , return_tensors="pt" , padding=lowerCAmelCase__ )
a : Optional[int] = self.clip(**lowerCAmelCase__ )
a : List[str] = clip_outputs.logits_per_image
if weights is not None:
a : Dict = similarity_logits * weights
return similarity_logits.sum()
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
a : str = self._get_clip_similarity(pos_prompts["prompts"] , lowerCAmelCase__ , weights=(1 / pos_prompts["weights"]) )
if neg_prompts:
a : List[str] = self._get_clip_similarity(neg_prompts["prompts"] , lowerCAmelCase__ , weights=neg_prompts["weights"] )
else:
a : str = torch.tensor([1] , device=self.device )
a : List[str] = -torch.log(lowerCAmelCase__ ) + torch.log(lowerCAmelCase__ )
return loss
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
a : List[Any] = torch.randn_like(self.latent , requires_grad=lowerCAmelCase__ , device=self.device )
a : Optional[Any] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
a : Union[str, Any] = self._add_vector(lowerCAmelCase__ )
a : Tuple = loop_post_process(lowerCAmelCase__ )
a : Dict = self._get_CLIP_loss(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
print("CLIP loss" , lowerCAmelCase__ )
if self.log:
wandb.log({"CLIP Loss": clip_loss} )
clip_loss.backward(retain_graph=lowerCAmelCase__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
wandb.init(reinit=lowerCAmelCase__ , project="face-editor" )
wandb.config.update({"Positive Prompts": positive_prompts} )
wandb.config.update({"Negative Prompts": negative_prompts} )
wandb.config.update({"lr": self.lr, "iterations": self.iterations} )
if image_path:
a : List[Any] = Image.open(lowerCAmelCase__ )
a : str = image.resize((256, 256) )
wandb.log("Original Image" , wandb.Image(lowerCAmelCase__ ) )
def __a ( self , lowerCAmelCase__ ) -> Any:
if not prompts:
return []
a : List[Any] = []
a : Tuple = []
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Optional[Any] = [prompt.strip() for prompt in prompts.split("|" )]
for prompt in prompts:
if isinstance(lowerCAmelCase__ , (tuple, list) ):
a : Dict = prompt[0]
a : str = float(prompt[1] )
elif ":" in prompt:
a : Tuple = prompt.split(":" )
a : List[str] = float(lowerCAmelCase__ )
else:
a : Dict = prompt
a : Dict = 1.0
processed_prompts.append(lowerCAmelCase__ )
weights.append(lowerCAmelCase__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCAmelCase__ , device=self.device ),
}
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , ) -> Tuple:
if image_path:
a : Optional[Any] = self._get_latent(lowerCAmelCase__ )
else:
a : int = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
assert pos_prompts, "You must provide at least one positive prompt."
a : List[str] = self.process_prompts(lowerCAmelCase__ )
a : Optional[Any] = self.process_prompts(lowerCAmelCase__ )
if save_final and save_path is None:
a : Union[str, Any] = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) )
if not os.path.exists(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
else:
a : Optional[int] = save_path + "_" + get_timestamp()
os.makedirs(lowerCAmelCase__ )
a : Optional[Any] = save_path
a : Optional[int] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("Original Image" )
show_pil(custom_to_pil(lowerCAmelCase__ ) )
a : Dict = loop_post_process(lowerCAmelCase__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) ):
if show_intermediate:
show_pil(lowerCAmelCase__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({"Image": wandb.Image(lowerCAmelCase__ )} )
if show_final:
show_pil(lowerCAmelCase__ )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) )
| 357 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _SCREAMING_SNAKE_CASE ( ) ->List[str]:
'''simple docstring'''
a : Optional[Any] = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_lowercase )
a : Optional[Any] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_lowercase )
env_command_parser(subparsers=_lowercase )
launch_command_parser(subparsers=_lowercase )
tpu_command_parser(subparsers=_lowercase )
test_command_parser(subparsers=_lowercase )
# Let's go
a : int = parser.parse_args()
if not hasattr(_lowercase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_lowercase )
if __name__ == "__main__":
main()
| 79 | 0 |
"""simple docstring"""
from math import sqrt
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
for i in range(1 , int(sqrt(_UpperCamelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(_UpperCamelCase ):
total += i + n // i
elif i == sqrt(_UpperCamelCase ):
total += i
return total - n
def _lowerCamelCase ( _UpperCamelCase = 1_0000 ):
'''simple docstring'''
__lowerCAmelCase = sum(
i
for i in range(1 , _UpperCamelCase )
if sum_of_divisors(sum_of_divisors(_UpperCamelCase ) ) == i and sum_of_divisors(_UpperCamelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 57 |
import torch
from torch import nn
class snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : int=1 , lowerCAmelCase : List[Any]=False) -> str:
"""simple docstring"""
super().__init__()
_snake_case : List[str] = n_token
_snake_case : Any = d_embed
_snake_case : List[str] = d_proj
_snake_case : Optional[int] = cutoffs + [n_token]
_snake_case : Dict = [0] + self.cutoffs
_snake_case : Optional[Any] = div_val
_snake_case : Tuple = self.cutoffs[0]
_snake_case : List[str] = len(self.cutoffs) - 1
_snake_case : str = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_snake_case : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
_snake_case : Any = nn.Parameter(torch.zeros(self.n_clusters))
_snake_case : Tuple = nn.ModuleList()
_snake_case : int = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase , lowerCAmelCase)))
else:
self.out_projs.append(lowerCAmelCase)
self.out_layers.append(nn.Linear(lowerCAmelCase , lowerCAmelCase))
else:
for i in range(len(self.cutoffs)):
_snake_case , _snake_case : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Dict = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase , lowerCAmelCase)))
self.out_layers.append(nn.Linear(lowerCAmelCase , r_idx - l_idx))
_snake_case : Tuple = keep_order
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int]) -> List[str]:
"""simple docstring"""
if proj is None:
_snake_case : List[Any] = nn.functional.linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_snake_case : List[str] = nn.functional.linear(lowerCAmelCase , proj.t().contiguous())
_snake_case : Optional[int] = nn.functional.linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : int=False) -> Tuple:
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
_snake_case : List[str] = hidden[..., :-1, :].contiguous()
_snake_case : int = labels[..., 1:].contiguous()
_snake_case : int = hidden.view(-1 , hidden.size(-1))
_snake_case : str = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""")
else:
_snake_case : List[Any] = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
_snake_case : int = self._compute_logit(lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
_snake_case : Optional[int] = labels != -100
_snake_case : Union[str, Any] = torch.zeros_like(lowerCAmelCase , dtype=hidden.dtype , device=hidden.device)
_snake_case : Union[str, Any] = (
-nn.functional.log_softmax(lowerCAmelCase , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
_snake_case : Optional[int] = nn.functional.log_softmax(lowerCAmelCase , dim=-1)
else:
# construct weights and biases
_snake_case , _snake_case : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_snake_case , _snake_case : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Dict = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : Tuple = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : Any = self.out_layers[i].weight
_snake_case : Optional[int] = self.out_layers[i].bias
if i == 0:
_snake_case : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0)
_snake_case : List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(lowerCAmelCase)
biases.append(lowerCAmelCase)
_snake_case , _snake_case , _snake_case : List[Any] = weights[0], biases[0], self.out_projs[0]
_snake_case : List[str] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : Dict = nn.functional.log_softmax(lowerCAmelCase , dim=1)
if labels is None:
_snake_case : List[Any] = hidden.new_empty((head_logit.size(0), self.n_token))
else:
_snake_case : Optional[Any] = torch.zeros_like(lowerCAmelCase , dtype=hidden.dtype , device=hidden.device)
_snake_case : Optional[int] = 0
_snake_case : Union[str, Any] = [0] + self.cutoffs
for i in range(len(lowerCAmelCase) - 1):
_snake_case , _snake_case : Any = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_snake_case : Optional[int] = (labels >= l_idx) & (labels < r_idx)
_snake_case : Dict = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_snake_case : Dict = labels.index_select(0 , lowerCAmelCase) - l_idx
_snake_case : List[Any] = head_logprob.index_select(0 , lowerCAmelCase)
_snake_case : Dict = hidden.index_select(0 , lowerCAmelCase)
else:
_snake_case : Optional[Any] = hidden
if i == 0:
if labels is not None:
_snake_case : str = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
_snake_case : int = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : Dict = weights[i], biases[i], self.out_projs[i]
_snake_case : int = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : List[str] = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : str = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_snake_case : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
_snake_case : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_snake_case : int = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""") and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : Optional[int]) -> Tuple:
"""simple docstring"""
if self.n_clusters == 0:
_snake_case : Optional[Any] = self._compute_logit(lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(lowerCAmelCase , dim=-1)
else:
# construct weights and biases
_snake_case , _snake_case : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_snake_case , _snake_case : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Optional[Any] = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : Union[str, Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : Tuple = self.out_layers[i].weight
_snake_case : Any = self.out_layers[i].bias
if i == 0:
_snake_case : Tuple = torch.cat([weight_i, self.cluster_weight] , dim=0)
_snake_case : Optional[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(lowerCAmelCase)
biases.append(lowerCAmelCase)
_snake_case , _snake_case , _snake_case : int = weights[0], biases[0], self.out_projs[0]
_snake_case : Union[str, Any] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : Any = hidden.new_empty((head_logit.size(0), self.n_token))
_snake_case : Optional[Any] = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : List[Any] = [0] + self.cutoffs
for i in range(len(lowerCAmelCase) - 1):
_snake_case , _snake_case : Any = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_snake_case : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : str = weights[i], biases[i], self.out_projs[i]
_snake_case : List[str] = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
_snake_case : str = nn.functional.log_softmax(lowerCAmelCase , dim=1)
_snake_case : Dict = head_logprob[:, -i] + tail_logprob_i
_snake_case : Any = logprob_i
return out
| 317 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def __UpperCAmelCase ( snake_case_ : Union[str, Any] , snake_case_ : Dict ) -> Optional[Any]:
"""simple docstring"""
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 353 |
"""simple docstring"""
from functools import reduce
SCREAMING_SNAKE_CASE : int = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __UpperCAmelCase ( snake_case_ : str = N ) -> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda snake_case_ , snake_case_ : str(int(snake_case_ ) * int(snake_case_ ) ) , n[i : i + 13] ) )
for i in range(len(snake_case_ ) - 12 ) )
if __name__ == "__main__":
print(F'{solution() = }') | 317 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=7, lowerCAmelCase=3, lowerCAmelCase=30, lowerCAmelCase=400, lowerCAmelCase=True, lowerCAmelCase=None, lowerCAmelCase=True, lowerCAmelCase=[0.5, 0.5, 0.5], lowerCAmelCase=[0.5, 0.5, 0.5], lowerCAmelCase=True, lowerCAmelCase=1 / 255, lowerCAmelCase=True, ):
"""simple docstring"""
lowerCamelCase_ =size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =min_resolution
lowerCamelCase_ =max_resolution
lowerCamelCase_ =do_resize
lowerCamelCase_ =size
lowerCamelCase_ =do_normalize
lowerCamelCase_ =image_mean
lowerCamelCase_ =image_std
lowerCamelCase_ =do_rescale
lowerCamelCase_ =rescale_factor
lowerCamelCase_ =do_pad
def lowercase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False ):
"""simple docstring"""
if not batched:
lowerCamelCase_ =image_inputs[0]
if isinstance(lowerCAmelCase, Image.Image ):
lowerCamelCase_, lowerCamelCase_ =image.size
else:
lowerCamelCase_, lowerCamelCase_ =image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ =int(self.size['''shortest_edge'''] * h / w )
lowerCamelCase_ =self.size['''shortest_edge''']
elif w > h:
lowerCamelCase_ =self.size['''shortest_edge''']
lowerCamelCase_ =int(self.size['''shortest_edge'''] * w / h )
else:
lowerCamelCase_ =self.size['''shortest_edge''']
lowerCamelCase_ =self.size['''shortest_edge''']
else:
lowerCamelCase_ =[]
for image in image_inputs:
lowerCamelCase_, lowerCamelCase_ =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ =max(lowerCAmelCase, key=lambda lowerCAmelCase : item[0] )[0]
lowerCamelCase_ =max(lowerCAmelCase, key=lambda lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : List[str] =DetaImageProcessor if is_vision_available() else None
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =DetaImageProcessingTester(self )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase, '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase, '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase, '''do_rescale''' ) )
self.assertTrue(hasattr(lowerCAmelCase, '''do_pad''' ) )
self.assertTrue(hasattr(lowerCAmelCase, '''size''' ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 18, '''longest_edge''': 1_333} )
self.assertEqual(image_processor.do_pad, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase, Image.Image )
# Test not batched input
lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase, batched=lowerCAmelCase )
lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase, numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase, np.ndarray )
# Test not batched input
lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase, batched=lowerCAmelCase )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase, torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase, torch.Tensor )
# Test not batched input
lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values
lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase, batched=lowerCAmelCase )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''', '''r''' ) as f:
lowerCamelCase_ =json.loads(f.read() )
lowerCamelCase_ ={'''image_id''': 39_769, '''annotations''': target}
# encode them
lowerCamelCase_ =DetaImageProcessor()
lowerCamelCase_ =image_processing(images=lowerCAmelCase, annotations=lowerCAmelCase, return_tensors='''pt''' )
# verify pixel values
lowerCamelCase_ =torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], lowerCAmelCase, atol=1e-4 ) )
# verify area
lowerCamelCase_ =torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], lowerCAmelCase ) )
# verify boxes
lowerCamelCase_ =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], lowerCAmelCase, atol=1e-3 ) )
# verify image_id
lowerCamelCase_ =torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], lowerCAmelCase ) )
# verify is_crowd
lowerCamelCase_ =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], lowerCAmelCase ) )
# verify class_labels
lowerCamelCase_ =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], lowerCAmelCase ) )
# verify orig_size
lowerCamelCase_ =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], lowerCAmelCase ) )
# verify size
lowerCamelCase_ =torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], lowerCAmelCase ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''', '''r''' ) as f:
lowerCamelCase_ =json.loads(f.read() )
lowerCamelCase_ ={'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
lowerCamelCase_ =pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCamelCase_ =DetaImageProcessor(format='''coco_panoptic''' )
lowerCamelCase_ =image_processing(images=lowerCAmelCase, annotations=lowerCAmelCase, masks_path=lowerCAmelCase, return_tensors='''pt''' )
# verify pixel values
lowerCamelCase_ =torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], lowerCAmelCase, atol=1e-4 ) )
# verify area
lowerCamelCase_ =torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], lowerCAmelCase ) )
# verify boxes
lowerCamelCase_ =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], lowerCAmelCase, atol=1e-3 ) )
# verify image_id
lowerCamelCase_ =torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], lowerCAmelCase ) )
# verify is_crowd
lowerCamelCase_ =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], lowerCAmelCase ) )
# verify class_labels
lowerCamelCase_ =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], lowerCAmelCase ) )
# verify masks
lowerCamelCase_ =822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item(), lowerCAmelCase )
# verify orig_size
lowerCamelCase_ =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], lowerCAmelCase ) )
# verify size
lowerCamelCase_ =torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], lowerCAmelCase ) )
| 75 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Tuple = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
_SCREAMING_SNAKE_CASE : List[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = pd.read_csv("""sample_data.csv""", header=None)
SCREAMING_SNAKE_CASE__ = df.shape[:1][0]
# If you're using some other dataset input the target column
SCREAMING_SNAKE_CASE__ = df.iloc[:, 1:2]
SCREAMING_SNAKE_CASE__ = actual_data.values.reshape(len_data, 1)
SCREAMING_SNAKE_CASE__ = MinMaxScaler().fit_transform(actual_data)
SCREAMING_SNAKE_CASE__ = 1_0
SCREAMING_SNAKE_CASE__ = 5
SCREAMING_SNAKE_CASE__ = 2_0
SCREAMING_SNAKE_CASE__ = len_data - periods * look_back
SCREAMING_SNAKE_CASE__ = actual_data[:division]
SCREAMING_SNAKE_CASE__ = actual_data[division - look_back :]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = [], []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
SCREAMING_SNAKE_CASE__ = np.array(train_x)
SCREAMING_SNAKE_CASE__ = np.array(test_x)
SCREAMING_SNAKE_CASE__ = np.array([list(i.ravel()) for i in train_y])
SCREAMING_SNAKE_CASE__ = np.array([list(i.ravel()) for i in test_y])
SCREAMING_SNAKE_CASE__ = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
SCREAMING_SNAKE_CASE__ = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
SCREAMING_SNAKE_CASE__ = model.predict(x_test)
| 353 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""RUCAIBox/mvp""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = MvpTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = "post_processor"
lowercase_ = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["sep"] )
if "cls" in state:
lowercase_ = tuple(state["cls"] )
lowercase_ = False
if state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("trim_offsets" , UpperCAmelCase ) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(UpperCAmelCase , state.pop("type" ) )
lowercase_ = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
def A__ ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
lowercase_ = value
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 297 | 0 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=768 ):
super().__init__(lowercase )
_lowerCamelCase : Any = proj_size
_lowerCamelCase : Dict = CLIPVisionModel(lowercase )
_lowerCamelCase : List[str] = PaintByExampleMapper(lowercase )
_lowerCamelCase : Optional[Any] = nn.LayerNorm(config.hidden_size )
_lowerCamelCase : int = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
_lowerCamelCase : str = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def A_ ( self , lowercase , lowercase=False ):
_lowerCamelCase : Union[str, Any] = self.model(pixel_values=lowercase )
_lowerCamelCase : int = clip_output.pooler_output
_lowerCamelCase : str = self.mapper(latent_states[:, None] )
_lowerCamelCase : List[Any] = self.final_layer_norm(lowercase )
_lowerCamelCase : Dict = self.proj_out(lowercase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase ):
super().__init__()
_lowerCamelCase : Tuple = (config.num_hidden_layers + 1) // 5
_lowerCamelCase : int = config.hidden_size
_lowerCamelCase : Optional[Any] = 1
_lowerCamelCase : str = nn.ModuleList(
[
BasicTransformerBlock(lowercase , lowercase , lowercase , activation_fn='gelu' , attention_bias=lowercase )
for _ in range(lowercase )
] )
def A_ ( self , lowercase ):
for block in self.blocks:
_lowerCamelCase : Tuple = block(lowercase )
return hidden_states | 96 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCAmelCase ( lowerCAmelCase__ ):
@slow
@require_torch
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__lowerCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 128
__lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(__UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=512 )
__lowerCamelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=128 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__lowerCamelCase = outputs.attention_mask
assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__UpperCAmelCase ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy='''steps''' , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , )
# start training
trainer.train()
| 330 | 0 |
_A = 8.3_144_598
def __UpperCamelCase ( _A , _A ):
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_A = 300
_A = 28
_A = rms_speed_of_molecule(temperature, molar_mass)
print(f"Vrms of Nitrogen gas at 300 K is {vrms} m/s")
| 167 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_A = logging.get_logger(__name__)
_A = TypeVar('''DatasetType''', Dataset, IterableDataset)
def __UpperCamelCase ( _A , _A = None , _A = None , _A = None , _A = None , _A = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(_A ):
if not isinstance(_A , (Dataset, IterableDataset) ):
if isinstance(_A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(_A )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_A ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_A ).__name__}." )
if i == 0:
lowerCAmelCase_ , lowerCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(_A , _A ) else (IterableDataset, Dataset)
)
elif not isinstance(_A , _A ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_A , _A , _A , info=_A , split=_A , stopping_strategy=_A )
else:
return _interleave_iterable_datasets(
_A , _A , _A , info=_A , split=_A , stopping_strategy=_A )
def __UpperCamelCase ( _A , _A = None , _A = None , _A = 0 , ):
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(_A ):
if not isinstance(_A , (Dataset, IterableDataset) ):
if isinstance(_A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(_A )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_A ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_A ).__name__}." )
if i == 0:
lowerCAmelCase_ , lowerCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(_A , _A ) else (IterableDataset, Dataset)
)
elif not isinstance(_A , _A ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_A , info=_A , split=_A , axis=_A )
else:
return _concatenate_iterable_datasets(_A , info=_A , split=_A , axis=_A )
| 167 | 1 |
'''simple docstring'''
def A_ ( ):
SCREAMING_SNAKE_CASE:Tuple = 0
for i in range(1 , 1001 ):
total += i**i
return str(snake_case )[-10:]
if __name__ == "__main__":
print(solution())
| 139 |
_lowercase: Dict = [
(1000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def a( A : str ) -> int:
"""simple docstring"""
a = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
a = 0
a = 0
while place < len(A ):
if (place + 1 < len(A )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def a( A : int ) -> str:
"""simple docstring"""
a = []
for arabic, roman in ROMAN:
((a) , (a)) = divmod(A , A )
result.append(roman * factor )
if number == 0:
break
return "".join(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Any = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : int = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 368 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_SCREAMING_SNAKE_CASE : Tuple = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
_SCREAMING_SNAKE_CASE : List[Any] = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
_SCREAMING_SNAKE_CASE : Tuple = """|""".join(sys.argv[1:])
_SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
_SCREAMING_SNAKE_CASE : str = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 157 | 0 |
from __future__ import annotations
def __lowercase ( a__ , a__ ) -> set[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = set(a__ ), [start]
while stack:
__SCREAMING_SNAKE_CASE = stack.pop()
explored.add(a__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(a__ )
return explored
lowerCAmelCase__ : Optional[Any] ={
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 257 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCAmelCase__ : List[Any] =logging.get_logger(__name__)
def __lowercase ( a__ , a__ , a__ , a__ ) -> Tuple[int, int]:
def constraint_to_multiple_of(a__ , a__ , a__=0 , a__=None ):
__SCREAMING_SNAKE_CASE = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__SCREAMING_SNAKE_CASE = math.floor(val / multiple ) * multiple
if x < min_val:
__SCREAMING_SNAKE_CASE = math.ceil(val / multiple ) * multiple
return x
__SCREAMING_SNAKE_CASE = (output_size, output_size) if isinstance(a__ , a__ ) else output_size
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_image_size(a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output_size
# determine new height and width
__SCREAMING_SNAKE_CASE = output_height / input_height
__SCREAMING_SNAKE_CASE = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__SCREAMING_SNAKE_CASE = scale_width
else:
# fit height
__SCREAMING_SNAKE_CASE = scale_height
__SCREAMING_SNAKE_CASE = constraint_to_multiple_of(scale_height * input_height , multiple=a__ )
__SCREAMING_SNAKE_CASE = constraint_to_multiple_of(scale_width * input_width , multiple=a__ )
return (new_height, new_width)
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : List[str] = ['''pixel_values''']
def __init__( self , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = False , _A = 1 , _A = True , _A = 1 / 255 , _A = True , _A = None , _A = None , **_A , ):
'''simple docstring'''
super().__init__(**_A )
__SCREAMING_SNAKE_CASE = size if size is not None else {'height': 384, 'width': 384}
__SCREAMING_SNAKE_CASE = get_size_dict(_A )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = keep_aspect_ratio
__SCREAMING_SNAKE_CASE = ensure_multiple_of
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _A ( self , _A , _A , _A = False , _A = 1 , _A = PILImageResampling.BICUBIC , _A = None , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__SCREAMING_SNAKE_CASE = get_resize_output_image_size(
_A , output_size=(size['height'], size['width']) , keep_aspect_ratio=_A , multiple=_A , )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def _A ( self , _A , _A , _A = None , **_A , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def _A ( self , _A , _A , _A , _A = None , **_A , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def _A ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(_A )
__SCREAMING_SNAKE_CASE = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__SCREAMING_SNAKE_CASE = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(_A ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_A , _A ) for image in images]
__SCREAMING_SNAKE_CASE = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
def _A ( self , _A , _A = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_A ) != len(_A ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_A ):
__SCREAMING_SNAKE_CASE = target_sizes.numpy()
__SCREAMING_SNAKE_CASE = []
for idx in range(len(_A ) ):
__SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_A )
__SCREAMING_SNAKE_CASE = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_A )
else:
__SCREAMING_SNAKE_CASE = logits.argmax(dim=1 )
__SCREAMING_SNAKE_CASE = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 257 | 1 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class __lowercase ( logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase ):
__a : List[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
__a : List[str] = kwargs.pop('''main_process_only''' , _UpperCAmelCase )
__a : Dict = kwargs.pop('''in_order''' , _UpperCAmelCase )
if self.isEnabledFor(_UpperCAmelCase ):
if self._should_log(_UpperCAmelCase ):
__a : Optional[int] = self.process(_UpperCAmelCase , _UpperCAmelCase )
self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
elif in_order:
__a : Tuple = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__a : Tuple = self.process(_UpperCAmelCase , _UpperCAmelCase )
self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
state.wait_for_everyone()
def __A ( a_ :str , a_ :str = None) -> List[str]:
if log_level is None:
__a : Dict = os.environ.get('''ACCELERATE_LOG_LEVEL''' , a_)
__a : Any = logging.getLogger(a_)
if log_level is not None:
logger.setLevel(log_level.upper())
logger.root.setLevel(log_level.upper())
return MultiProcessAdapter(a_ , {})
| 371 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __lowercase ( enum.Enum ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 2
@add_end_docstrings(_UpperCamelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__a : List[Any] = None
if self.model.config.prefix is not None:
__a : Tuple = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__a : Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__a , __a , __a : Optional[int] = self._sanitize_parameters(prefix=_UpperCAmelCase , **self._forward_params )
__a : Dict = {**self._preprocess_params, **preprocess_params}
__a : str = {**self._forward_params, **forward_params}
def _lowerCamelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
__a : str = {}
if prefix is not None:
__a : Tuple = prefix
if prefix:
__a : str = self.tokenizer(
_UpperCAmelCase , padding=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=self.framework )
__a : List[str] = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
__a : str = handle_long_generation
preprocess_params.update(_UpperCAmelCase )
__a : int = generate_kwargs
__a : List[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
__a : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
__a : List[Any] = ReturnType.TENSORS
if return_type is not None:
__a : Union[str, Any] = return_type
if clean_up_tokenization_spaces is not None:
__a : Optional[int] = clean_up_tokenization_spaces
if stop_sequence is not None:
__a : Any = self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
__a : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_UpperCAmelCase , **_UpperCAmelCase )
def __call__( self , _UpperCAmelCase , **_UpperCAmelCase ):
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase="" , _UpperCAmelCase=None , **_UpperCAmelCase ):
__a : Tuple = self.tokenizer(
prefix + prompt_text , padding=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=self.framework )
__a : int = prompt_text
if handle_long_generation == "hole":
__a : str = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
__a : Tuple = generate_kwargs['''max_new_tokens''']
else:
__a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__a : Dict = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
__a : int = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
__a : List[str] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def _lowerCamelCase ( self , _UpperCAmelCase , **_UpperCAmelCase ):
__a : str = model_inputs['''input_ids''']
__a : Dict = model_inputs.get('''attention_mask''' , _UpperCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
__a : str = None
__a : List[Any] = None
__a : Any = 1
else:
__a : List[Any] = input_ids.shape[0]
__a : str = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__a : List[str] = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
__a : str = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
__a : Tuple = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__a : Dict = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__a : List[str] = self.model.generate(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , **_UpperCAmelCase )
__a : int = generated_sequence.shape[0]
if self.framework == "pt":
__a : Union[str, Any] = generated_sequence.reshape(_UpperCAmelCase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__a : Dict = tf.reshape(_UpperCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=ReturnType.FULL_TEXT , _UpperCAmelCase=True ):
__a : Optional[Any] = model_outputs['''generated_sequence'''][0]
__a : List[str] = model_outputs['''input_ids''']
__a : Optional[Any] = model_outputs['''prompt_text''']
__a : str = generated_sequence.numpy().tolist()
__a : List[Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__a : Union[str, Any] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__a : List[str] = self.tokenizer.decode(
_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__a : Dict = 0
else:
__a : Any = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , ) )
if return_type == ReturnType.FULL_TEXT:
__a : Any = prompt_text + text[prompt_length:]
else:
__a : Any = text[prompt_length:]
__a : Dict = {'''generated_text''': all_text}
records.append(_UpperCAmelCase )
return records | 188 | 0 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE =[
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 213 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
__SCREAMING_SNAKE_CASE :List[str] = (
'''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'''
)
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Tuple ) -> int:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
return (preds == labels).mean()
def UpperCAmelCase_ ( __lowercase : int , __lowercase : str ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
_UpperCAmelCase = simple_accuracy(__lowercase , __lowercase )
_UpperCAmelCase = fa_score(y_true=__lowercase , y_pred=__lowercase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : List[str] ) -> List[Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
_UpperCAmelCase = pearsonr(__lowercase , __lowercase )[0]
_UpperCAmelCase = spearmanr(__lowercase , __lowercase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def UpperCAmelCase_ ( __lowercase : Optional[Any] , __lowercase : str , __lowercase : str ) -> Tuple:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
assert len(__lowercase ) == len(__lowercase ), f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}'
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowercase , __lowercase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "mrpc":
return acc_and_fa(__lowercase , __lowercase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowercase , __lowercase )
elif task_name == "qqp":
return acc_and_fa(__lowercase , __lowercase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(__lowercase )
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : str ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(__lowercase , __lowercase )
requires_backends(__lowercase , "sklearn" )
if len(__lowercase ) != len(__lowercase ):
raise ValueError(f'Predictions and labels have mismatched lengths {len(__lowercase )} and {len(__lowercase )}' )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(__lowercase )
| 22 | 0 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__=True , a__="pt" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = {'''add_prefix_space''': True} if isinstance(A__ , A__ ) and not line.startswith(''' ''' ) else {}
SCREAMING_SNAKE_CASE : Optional[Any] = padding_side
return tokenizer(
[line] , max_length=A__ , padding='''max_length''' if pad_to_max_length else None , truncation=A__ , return_tensors=A__ , add_special_tokens=A__ , **A__ , )
def UpperCAmelCase_( a__ , a__ , a__=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.ne(A__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class a_ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="train" , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="" , ) ->Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE : Dict = Path(lowercase__ ).joinpath(type_path + '''.source''' )
SCREAMING_SNAKE_CASE : Optional[Any] = Path(lowercase__ ).joinpath(type_path + '''.target''' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_char_lens(self.src_file )
SCREAMING_SNAKE_CASE : Optional[int] = max_source_length
SCREAMING_SNAKE_CASE : str = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
SCREAMING_SNAKE_CASE : Dict = tokenizer
SCREAMING_SNAKE_CASE : int = prefix
if n_obs is not None:
SCREAMING_SNAKE_CASE : Dict = self.src_lens[:n_obs]
SCREAMING_SNAKE_CASE : Any = src_lang
SCREAMING_SNAKE_CASE : int = tgt_lang
def __len__( self ) ->List[Any]:
return len(self.src_lens )
def __getitem__( self , _lowerCamelCase ) ->Any:
SCREAMING_SNAKE_CASE : Dict = index + 1 # linecache starts at 1
SCREAMING_SNAKE_CASE : Tuple = self.prefix + linecache.getline(str(self.src_file ) , lowercase__ ).rstrip('''\n''' )
SCREAMING_SNAKE_CASE : int = linecache.getline(str(self.tgt_file ) , lowercase__ ).rstrip('''\n''' )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowercase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
SCREAMING_SNAKE_CASE : int = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowercase__ ) else self.tokenizer
)
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer , lowercase__ ) else self.tokenizer
SCREAMING_SNAKE_CASE : Any = encode_line(lowercase__ , lowercase__ , self.max_source_length , '''right''' )
SCREAMING_SNAKE_CASE : int = encode_line(lowercase__ , lowercase__ , self.max_target_length , '''right''' )
SCREAMING_SNAKE_CASE : Optional[Any] = source_inputs['''input_ids'''].squeeze()
SCREAMING_SNAKE_CASE : Union[str, Any] = target_inputs['''input_ids'''].squeeze()
SCREAMING_SNAKE_CASE : List[Any] = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __lowerCAmelCase ( _lowerCamelCase ) ->Dict:
return [len(lowercase__ ) for x in Path(lowercase__ ).open().readlines()]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Optional[int] = torch.stack([x['''input_ids'''] for x in batch] )
SCREAMING_SNAKE_CASE : str = torch.stack([x['''attention_mask'''] for x in batch] )
SCREAMING_SNAKE_CASE : Tuple = torch.stack([x['''decoder_input_ids'''] for x in batch] )
SCREAMING_SNAKE_CASE : List[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowercase__ )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowercase__ )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE : List[Any] = trim_batch(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = trim_batch(lowercase__ , lowercase__ , attention_mask=lowercase__ )
SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
a__ : Union[str, Any] = getLogger(__name__)
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return list(itertools.chain.from_iterable(A__ ) )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = get_git_info()
save_json(A__ , os.path.join(A__ , '''git_log.json''' ) )
def UpperCAmelCase_( a__ , a__ , a__=4 , **a__ ):
"""simple docstring"""
with open(A__ , '''w''' ) as f:
json.dump(A__ , A__ , indent=A__ , **A__ )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
with open(A__ ) as f:
return json.load(A__ )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = git.Repo(search_parent_directories=A__ )
SCREAMING_SNAKE_CASE : Any = {
'''repo_id''': str(A__ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return list(map(A__ , A__ ) )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
with open(A__ , '''wb''' ) as f:
return pickle.dump(A__ , A__ )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
def remove_articles(a__ ):
return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , A__ )
def white_space_fix(a__ ):
return " ".join(text.split() )
def remove_punc(a__ ):
SCREAMING_SNAKE_CASE : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A__ ) ) ) )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = normalize_answer(A__ ).split()
SCREAMING_SNAKE_CASE : List[Any] = normalize_answer(A__ ).split()
SCREAMING_SNAKE_CASE : Optional[Any] = Counter(A__ ) & Counter(A__ )
SCREAMING_SNAKE_CASE : List[str] = sum(common.values() )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE : Tuple = 1.0 * num_same / len(A__ )
SCREAMING_SNAKE_CASE : str = 1.0 * num_same / len(A__ )
SCREAMING_SNAKE_CASE : Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return normalize_answer(A__ ) == normalize_answer(A__ )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
assert len(A__ ) == len(A__ )
SCREAMING_SNAKE_CASE : Any = 0
for hypo, pred in zip(A__ , A__ ):
em += exact_match_score(A__ , A__ )
if len(A__ ) > 0:
em /= len(A__ )
return {"em": em}
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
SCREAMING_SNAKE_CASE : str = '''dropout_rate'''
for p in extra_params:
if getattr(A__ , A__ , A__ ):
if not hasattr(A__ , A__ ) and not hasattr(A__ , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(A__ ) )
delattr(A__ , A__ )
continue
SCREAMING_SNAKE_CASE : List[Any] = p if hasattr(A__ , A__ ) else equivalent_param[p]
setattr(A__ , A__ , getattr(A__ , A__ ) )
delattr(A__ , A__ )
return hparams, config
| 357 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a__ : Optional[Any] = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['''DeiTFeatureExtractor''']
a__ : Any = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class __lowerCAmelCase ( snake_case__ ):
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , *lowerCAmelCase , **lowerCAmelCase ) -> int:
'''simple docstring'''
super().__init__(*_A , **_A )
if config is None:
assert isinstance(self.model , _A ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
_lowercase =self.model.config
else:
_lowercase =config
_lowercase =data_args
_lowercase =self.config.tgt_vocab_size if isinstance(self.config , _A ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
' padding..' )
if self.args.label_smoothing == 0:
_lowercase =torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_lowercase =label_smoothed_nll_loss
def A__ ( self , lowerCAmelCase ) -> Any:
'''simple docstring'''
if self.optimizer is None:
_lowercase =['bias', 'LayerNorm.weight']
_lowercase =[
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
_lowercase =Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_lowercase =Adafactor
_lowercase ={'scale_parameter': False, 'relative_step': False}
else:
_lowercase =AdamW
_lowercase ={
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
_lowercase =self.args.learning_rate
if self.sharded_ddp:
_lowercase =OSS(
params=_A , optim=_A , **_A , )
else:
_lowercase =optimizer_cls(_A , **_A )
if self.lr_scheduler is None:
_lowercase =self._get_lr_scheduler(_A )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def A__ ( self , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase =arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_lowercase =schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_lowercase =schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_lowercase =schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_A )
return scheduler
def A__ ( self ) -> Tuple:
'''simple docstring'''
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Dict:
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_lowercase =model(**_A , use_cache=_A )[0]
_lowercase =self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_lowercase =model(**_A , labels=_A , use_cache=_A )[:2]
else:
# compute label smoothed loss
_lowercase =model(**_A , use_cache=_A )[0]
_lowercase =torch.nn.functional.log_softmax(_A , dim=-1 )
_lowercase =self.loss_fn(_A , _A , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowercase =inputs.pop('labels' )
_lowercase =self._compute_loss(_A , _A , _A )
return loss
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , ) -> Dict:
'''simple docstring'''
_lowercase =self._prepare_inputs(_A )
_lowercase ={
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_lowercase =self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **_A , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_lowercase =self._pad_tensors_to_max_len(_A , gen_kwargs['max_length'] )
_lowercase =inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
_lowercase =self._compute_loss(_A , _A , _A )
_lowercase =loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_lowercase =generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_lowercase =self._pad_tensors_to_max_len(_A , gen_kwargs['max_length'] )
return (loss, logits, labels)
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> List[str]:
'''simple docstring'''
_lowercase =self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F''' padded to `max_length`={max_length}''' )
_lowercase =pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_lowercase =tensor
return padded_tensor
| 205 |
import colorsys
from PIL import Image # type: ignore
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> float:
__A : List[str] = x
__A : str = y
for step in range(a ): # noqa: B007
__A : Union[str, Any] = a * a - b * b + x
__A : Optional[int] = 2 * a * b + y
__A : List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _SCREAMING_SNAKE_CASE ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def _SCREAMING_SNAKE_CASE ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(a , 1 , 1 ) )
def _SCREAMING_SNAKE_CASE ( a = 8_00 , a = 6_00 , a = -0.6 , a = 0 , a = 3.2 , a = 50 , a = True , ) -> Image.Image:
__A : str = Image.new('RGB' , (image_width, image_height) )
__A : Dict = img.load()
# loop through the image-coordinates
for image_x in range(a ):
for image_y in range(a ):
# determine the figure-coordinates based on the image-coordinates
__A : Dict = figure_width / image_width * image_height
__A : Union[str, Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
__A : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
__A : Union[str, Any] = get_distance(a , a , a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__A : Optional[Any] = get_color_coded_rgb(a )
else:
__A : Dict = get_black_and_white_rgb(a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
UpperCAmelCase : str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 280 | 0 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def lowerCamelCase_ ( _a ):
"""simple docstring"""
return input_array.reshape((input_array.size, 1) )
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = np.nan
for i in range(_a ):
lowerCAmelCase__ : List[str] = features[:, labels == i]
lowerCAmelCase__ : Union[str, Any] = data.mean(1 )
# Centralize the data of class i
lowerCAmelCase__ : Optional[Any] = data - column_reshape(_a )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_a , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase__ : Optional[Any] = np.dot(_a , centered_data.T )
return covariance_sum / features.shape[1]
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = features.mean(1 )
lowerCAmelCase__ : List[str] = np.nan
for i in range(_a ):
lowerCAmelCase__ : str = features[:, labels == i]
lowerCAmelCase__ : Any = data.shape[1]
lowerCAmelCase__ : int = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_a ) - column_reshape(_a ) , (column_reshape(_a ) - column_reshape(_a )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase__ : Optional[Any] = device_data * np.dot(
column_reshape(_a ) - column_reshape(_a ) , (column_reshape(_a ) - column_reshape(_a )).T , )
return covariance_sum / features.shape[1]
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
if features.any():
lowerCAmelCase__ : Any = features.mean(1 )
# Center the dataset
lowerCAmelCase__ : Optional[int] = features - np.reshape(_a , (data_mean.size, 1) )
lowerCAmelCase__ : List[Any] = np.dot(_a , centered_data.T ) / features.shape[1]
lowerCAmelCase__ , lowerCAmelCase__ : int = np.linalg.eigh(_a )
# Take all the columns in the reverse order (-1), and then takes only the first
lowerCAmelCase__ : Tuple = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowerCAmelCase__ : Any = np.dot(filtered_eigenvectors.T , _a )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=_a )
logging.error('''Dataset empty''' )
raise AssertionError
def lowerCamelCase_ ( _a , _a , _a , _a ):
"""simple docstring"""
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = eigh(
covariance_between_classes(_a , _a , _a ) , covariance_within_classes(_a , _a , _a ) , )
lowerCAmelCase__ : Dict = eigenvectors[:, ::-1][:, :dimensions]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = np.linalg.svd(_a )
lowerCAmelCase__ : Any = svd_matrix[:, 0:dimensions]
lowerCAmelCase__ : Tuple = np.dot(filtered_svd_matrix.T , _a )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=_a )
logging.error('''Dataset empty''' )
raise AssertionError
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : int = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowerCAmelCase__ : List[str] = np.array([0, 0, 0, 1, 1] )
lowerCAmelCase__ : Optional[int] = 2
lowerCAmelCase__ : str = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_a ) as error_info:
lowerCAmelCase__ : Dict = linear_discriminant_analysis(
_a , _a , _a , _a )
if isinstance(_a , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowerCAmelCase__ : str = 2
lowerCAmelCase__ : int = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_a ) as error_info:
lowerCAmelCase__ : str = principal_component_analysis(_a , _a )
if not np.allclose(_a , _a ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
lowerCamelCase = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase = {f'''funnel-transformer/{name}''': 512 for name in _model_names}
lowerCamelCase = {f'''funnel-transformer/{name}''': {'''do_lower_case''': True} for name in _model_names}
class _a ( _lowercase):
_a : Tuple = VOCAB_FILES_NAMES
_a : Dict = PRETRAINED_VOCAB_FILES_MAP
_a : Dict = PRETRAINED_INIT_CONFIGURATION
_a : Union[str, Any] = FunnelTokenizer
_a : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : int = 2
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : Any="<unk>" , _SCREAMING_SNAKE_CASE : Dict="<sep>" , _SCREAMING_SNAKE_CASE : Optional[int]="<pad>" , _SCREAMING_SNAKE_CASE : str="<cls>" , _SCREAMING_SNAKE_CASE : List[str]="<mask>" , _SCREAMING_SNAKE_CASE : Optional[int]="<s>" , _SCREAMING_SNAKE_CASE : Dict="</s>" , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : str="##" , **_SCREAMING_SNAKE_CASE : List[str] , )-> List[str]:
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , clean_text=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , wordpieces_prefix=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
lowerCAmelCase__ : int = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop('''type''' ) )
lowerCAmelCase__ : Dict = do_lower_case
lowerCAmelCase__ : str = strip_accents
lowerCAmelCase__ : Dict = tokenize_chinese_chars
lowerCAmelCase__ : str = normalizer_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = do_lower_case
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any]=None )-> Optional[int]:
lowerCAmelCase__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None )-> List[int]:
lowerCAmelCase__ : str = [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None )-> Tuple[str]:
lowerCAmelCase__ : Any = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 211 | 1 |
from collections.abc import Sequence
def UpperCamelCase ( __lowerCamelCase : Sequence[float] , __lowerCamelCase : bool = False ):
if not arr:
return 0
snake_case : Tuple = 0 if allow_empty_subarrays else float("-inf" )
snake_case : int = 0.0
for num in arr:
snake_case : Dict = max(0 if allow_empty_subarrays else num , curr_sum + num )
snake_case : str = max(__lowerCamelCase , __lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowerCamelCase = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }')
| 59 |
from __future__ import annotations
def lowerCAmelCase__( lowercase : str , lowercase : list[str] | None = None ) -> list[list[str]]:
__snake_case : List[str] = word_bank or []
# create a table
__snake_case : int = len(lowercase ) + 1
__snake_case : list[list[list[str]]] = []
for _ in range(lowercase ):
table.append([] )
# seed value
__snake_case : Optional[int] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowercase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowercase )] == word:
__snake_case : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowercase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowercase )]:
combination.reverse()
return table[len(lowercase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 326 | 0 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case_ :
def __init__( self : Tuple , _snake_case : Tuple , _snake_case : List[str]=13 , _snake_case : List[Any]=30 , _snake_case : str=2 , _snake_case : Dict=3 , _snake_case : List[Any]=True , _snake_case : Dict=True , _snake_case : List[str]=32 , _snake_case : Optional[Any]=5 , _snake_case : List[str]=4 , _snake_case : Tuple=37 , _snake_case : int="gelu" , _snake_case : int=0.1 , _snake_case : int=0.1 , _snake_case : Tuple=10 , _snake_case : List[Any]=0.02 , _snake_case : Optional[int]=None , )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Any = parent
__lowerCAmelCase : List[str] = batch_size
__lowerCAmelCase : Any = image_size
__lowerCAmelCase : Optional[Any] = patch_size
__lowerCAmelCase : Tuple = num_channels
__lowerCAmelCase : Optional[int] = is_training
__lowerCAmelCase : Any = use_labels
__lowerCAmelCase : int = hidden_size
__lowerCAmelCase : int = num_hidden_layers
__lowerCAmelCase : int = num_attention_heads
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : str = type_sequence_label_size
__lowerCAmelCase : str = initializer_range
__lowerCAmelCase : Union[str, Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : Dict = (image_size // patch_size) ** 2
__lowerCAmelCase : List[Any] = num_patches + 1
def UpperCAmelCase__ ( self : List[Any] )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : str = None
if self.use_labels:
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : List[str] )->Union[str, Any]:
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Any , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : Dict )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = ViTMSNModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowerCAmelCase : Dict = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple , _snake_case : Tuple , _snake_case : Any , _snake_case : Union[str, Any] )->int:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.type_sequence_label_size
__lowerCAmelCase : Dict = ViTMSNForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowerCAmelCase : List[Any] = model(lowercase__ , labels=lowercase__ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase : Union[str, Any] = 1
__lowerCAmelCase : int = ViTMSNForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowerCAmelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase : Union[str, Any] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : List[Any] )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = config_and_inputs
__lowerCAmelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( lowerCamelCase__ ,lowerCamelCase__ ,unittest.TestCase ):
A_ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
A_ = (
{'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def UpperCAmelCase__ ( self : Optional[Any] )->List[str]:
'''simple docstring'''
__lowerCAmelCase : int = ViTMSNModelTester(self )
__lowerCAmelCase : Dict = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Any )->str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : Any )->int:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Any )->Tuple:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Tuple = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def UpperCAmelCase__ ( self : int )->Any:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class(lowercase__ )
__lowerCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Any = [*signature.parameters.keys()]
__lowerCAmelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase__ )
def UpperCAmelCase__ ( self : Dict )->Any:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def UpperCAmelCase__ ( self : List[Any] )->Dict:
'''simple docstring'''
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def UpperCAmelCase__ ( self : int )->Tuple:
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : int = ViTMSNModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
__lowerCAmelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : str )->Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : str )->Tuple:
'''simple docstring'''
torch.manual_seed(2 )
__lowerCAmelCase : str = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(lowercase__ )
__lowerCAmelCase : str = self.default_image_processor
__lowerCAmelCase : Union[str, Any] = prepare_img()
__lowerCAmelCase : Union[str, Any] = image_processor(images=lowercase__ , return_tensors="""pt""" ).to(lowercase__ )
# forward pass
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model(**lowercase__ )
# verify the logits
__lowerCAmelCase : List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
__lowerCAmelCase : str = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) ) | 358 |
from math import isqrt
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int ) -> list[int]:
__lowerCAmelCase : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int = 10**8 ) -> int:
__lowerCAmelCase : int = calculate_prime_numbers(max_number // 2 )
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : List[Any] = len(SCREAMING_SNAKE_CASE ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''') | 232 | 0 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase : List[Any] = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(__snake_case ) )
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , """words.txt""" )
__SCREAMING_SNAKE_CASE = ''
with open(__snake_case ) as f:
__SCREAMING_SNAKE_CASE = f.readline()
__SCREAMING_SNAKE_CASE = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )]
__SCREAMING_SNAKE_CASE = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 267 |
'''simple docstring'''
import math
def _lowerCAmelCase ( __snake_case : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( __snake_case : float = 0.1 ) -> int:
__A : Tuple = 3
__A : Optional[int] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod() | 190 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a (UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Any = LayoutLMTokenizer
_SCREAMING_SNAKE_CASE :Any = LayoutLMTokenizerFast
_SCREAMING_SNAKE_CASE :int = True
_SCREAMING_SNAKE_CASE :Optional[Any] = True
def _a ( self ) -> Any:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _a ( self , **_a ) -> List[Any]:
"""simple docstring"""
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_a )
def _a ( self , _a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE__ : Optional[int] = """unwanted, running"""
return input_text, output_text
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_a , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 10, 8, 9] )
def _a ( self ) -> List[Any]:
"""simple docstring"""
pass
| 353 |
"""simple docstring"""
class __a :
'''simple docstring'''
def __init__( self , _a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = arr.split(""",""" )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = [int(self.array[0] )] * len(self.array )
SCREAMING_SNAKE_CASE__ : List[str] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
SCREAMING_SNAKE_CASE__ : Dict = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
SCREAMING_SNAKE_CASE__ : List[Any] = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
a :Optional[Any] = input("please input some numbers:")
a :Optional[Any] = SubArray(whole_array)
a :Optional[int] = array.solve_sub_array()
print(("the results is:", re))
| 56 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__: Dict = logging.get_logger(__name__)
A__: Dict = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Union[str, Any] = "markuplm"
def __init__( self :Dict , SCREAMING_SNAKE_CASE :str=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Dict=7_6_8 , SCREAMING_SNAKE_CASE :int=1_2 , SCREAMING_SNAKE_CASE :Any=1_2 , SCREAMING_SNAKE_CASE :List[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE :Optional[int]="gelu" , SCREAMING_SNAKE_CASE :List[str]=0.1 , SCREAMING_SNAKE_CASE :List[Any]=0.1 , SCREAMING_SNAKE_CASE :int=5_1_2 , SCREAMING_SNAKE_CASE :Optional[int]=2 , SCREAMING_SNAKE_CASE :Any=0.02 , SCREAMING_SNAKE_CASE :str=1e-12 , SCREAMING_SNAKE_CASE :List[str]=0 , SCREAMING_SNAKE_CASE :Tuple=0 , SCREAMING_SNAKE_CASE :str=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=2_5_6 , SCREAMING_SNAKE_CASE :Optional[Any]=1_0_2_4 , SCREAMING_SNAKE_CASE :Dict=2_1_6 , SCREAMING_SNAKE_CASE :Union[str, Any]=1_0_0_1 , SCREAMING_SNAKE_CASE :List[Any]=3_2 , SCREAMING_SNAKE_CASE :Optional[Any]=5_0 , SCREAMING_SNAKE_CASE :Union[str, Any]="absolute" , SCREAMING_SNAKE_CASE :int=True , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Tuple:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_a : str =vocab_size
_a : List[Any] =hidden_size
_a : Tuple =num_hidden_layers
_a : Dict =num_attention_heads
_a : int =hidden_act
_a : Union[str, Any] =intermediate_size
_a : str =hidden_dropout_prob
_a : List[Any] =attention_probs_dropout_prob
_a : Union[str, Any] =max_position_embeddings
_a : Any =type_vocab_size
_a : Any =initializer_range
_a : int =layer_norm_eps
_a : Any =position_embedding_type
_a : List[str] =use_cache
_a : Union[str, Any] =classifier_dropout
# additional properties
_a : Optional[int] =max_depth
_a : Optional[Any] =max_xpath_tag_unit_embeddings
_a : Optional[Any] =max_xpath_subs_unit_embeddings
_a : Dict =tag_pad_id
_a : str =subs_pad_id
_a : List[str] =xpath_unit_hidden_size
| 276 |
'''simple docstring'''
from __future__ import annotations
import requests
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ) -> dict:
_a : Any =F"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(_UpperCAmelCase ).json()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10 ) -> list[dict]:
_a : Union[str, Any] ="""https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
_a : int =requests.get(_UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 10 ) -> str:
_a : Union[str, Any] =hackernews_top_stories(_UpperCAmelCase )
return "\n".join("""* [{title}]({url})""".format(**_UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 276 | 1 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __A( a ):
snake_case_ = '''EncodecFeatureExtractor'''
snake_case_ = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , _snake_case , _snake_case ) -> Tuple:
'''simple docstring'''
super().__init__(_snake_case , _snake_case )
__a = self.feature_extractor
__a = False
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=None , _snake_case=None , _snake_case=True ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=_snake_case , language=_snake_case , no_timestamps=_snake_case )
def __call__( self , *_snake_case , **_snake_case ) -> Dict:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_snake_case , **_snake_case )
__a = kwargs.pop('''audio''' , _snake_case )
__a = kwargs.pop('''sampling_rate''' , _snake_case )
__a = kwargs.pop('''text''' , _snake_case )
if len(_snake_case ) > 0:
__a = args[0]
__a = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
__a = self.tokenizer(_snake_case , **_snake_case )
if audio is not None:
__a = self.feature_extractor(_snake_case , *_snake_case , sampling_rate=_snake_case , **_snake_case )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__a = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
__a = audio_inputs['''padding_mask''']
return inputs
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Dict:
'''simple docstring'''
__a = kwargs.pop('''audio''' , _snake_case )
__a = kwargs.pop('''padding_mask''' , _snake_case )
if len(_snake_case ) > 0:
__a = args[0]
__a = args[1:]
if audio_values is not None:
return self._decode_audio(_snake_case , padding_mask=_snake_case )
else:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> List[np.ndarray]:
'''simple docstring'''
__a = to_numpy(_snake_case )
__a , __a , __a = audio_values.shape
if padding_mask is None:
return list(_snake_case )
__a = to_numpy(_snake_case )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__a = seq_len - padding_mask.shape[-1]
__a = 1 - self.feature_extractor.padding_value
__a = np.pad(_snake_case , ((0, 0), (0, difference)) , '''constant''' , constant_values=_snake_case )
__a = audio_values.tolist()
for i in range(_snake_case ):
__a = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__a = sliced_audio.reshape(_snake_case , -1 )
return audio_values | 33 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A : List[Any] = logging.get_logger(__name__)
A : Optional[Any] = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class __A( a ):
snake_case_ = '''table-transformer'''
snake_case_ = ['''past_key_values''']
snake_case_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _snake_case=True , _snake_case=None , _snake_case=3 , _snake_case=100 , _snake_case=6 , _snake_case=2_048 , _snake_case=8 , _snake_case=6 , _snake_case=2_048 , _snake_case=8 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=True , _snake_case="relu" , _snake_case=256 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1.0 , _snake_case=False , _snake_case="sine" , _snake_case="resnet50" , _snake_case=True , _snake_case=False , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=1 , _snake_case=1 , _snake_case=5 , _snake_case=2 , _snake_case=0.1 , **_snake_case , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__a = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_snake_case , _snake_case ):
__a = backbone_config.get('''model_type''' )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(_snake_case )
# set timm attributes to None
__a , __a , __a = None, None, None
__a = use_timm_backbone
__a = backbone_config
__a = num_channels
__a = num_queries
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = init_xavier_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = encoder_layers
__a = auxiliary_loss
__a = position_embedding_type
__a = backbone
__a = use_pretrained_backbone
__a = dilation
# Hungarian matcher
__a = class_cost
__a = bbox_cost
__a = giou_cost
# Loss coefficients
__a = mask_loss_coefficient
__a = dice_loss_coefficient
__a = bbox_loss_coefficient
__a = giou_loss_coefficient
__a = eos_coefficient
super().__init__(is_encoder_decoder=_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return self.d_model
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return 12 | 33 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class a_ ( unittest.TestCase ):
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCamelCase = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
UpperCamelCase = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def A__ ( self , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = self.get_image_processor()
UpperCamelCase = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
UpperCamelCase = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
UpperCamelCase = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
UpperCamelCase = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = """lower newer"""
UpperCamelCase = processor(text=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer(_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = """lower newer"""
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase = processor.batch_decode(_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase = """lower newer"""
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 321 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 4000000 )-> int:
UpperCamelCase = []
UpperCamelCase ,UpperCamelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__UpperCamelCase )
UpperCamelCase ,UpperCamelCase = b, a + b
return sum(__UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 321 | 1 |
"""simple docstring"""
import math
def __SCREAMING_SNAKE_CASE ( A_ , A_ = 0 , A_ = 0 ):
lowerCAmelCase__ : Tuple = end or len(A_ )
for i in range(A_ , A_ ):
lowerCAmelCase__ : Dict = i
lowerCAmelCase__ : List[Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCAmelCase__ : str = array[temp_index - 1]
temp_index -= 1
lowerCAmelCase__ : int = temp_index_value
return array
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ): # Max Heap
lowerCAmelCase__ : List[str] = index
lowerCAmelCase__ : Any = 2 * index + 1 # Left Node
lowerCAmelCase__ : int = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCAmelCase__ : Tuple = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCAmelCase__ : int = right_index
if largest != index:
lowerCAmelCase__ ,lowerCAmelCase__ : Dict = array[largest], array[index]
heapify(A_ , A_ , A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Any = len(A_ )
for i in range(n // 2 , -1 , -1 ):
heapify(A_ , A_ , A_ )
for i in range(n - 1 , 0 , -1 ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = array[0], array[i]
heapify(A_ , 0 , A_ )
return array
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ : str = low
lowerCAmelCase__ : Union[str, Any] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = array[j], array[i]
i += 1
def __SCREAMING_SNAKE_CASE ( A_ ):
if len(A_ ) == 0:
return array
lowerCAmelCase__ : int = 2 * math.ceil(math.loga(len(A_ ) ) )
lowerCAmelCase__ : Optional[Any] = 16
return intro_sort(A_ , 0 , len(A_ ) , A_ , A_ )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , A_ ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(A_ )
max_depth -= 1
lowerCAmelCase__ : List[str] = median_of_a(A_ , A_ , start + ((end - start) // 2) + 1 , end - 1 )
lowerCAmelCase__ : Union[str, Any] = partition(A_ , A_ , A_ , A_ )
intro_sort(A_ , A_ , A_ , A_ , A_ )
lowerCAmelCase__ : Optional[int] = p
return insertion_sort(A_ , A_ , A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Optional[Any] = input('''Enter numbers separated by a comma : ''').strip()
__UpperCamelCase : Tuple = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 74 |
"""simple docstring"""
import qiskit
def __SCREAMING_SNAKE_CASE ( A_ = 2 ):
lowerCAmelCase__ : int = qubits
# Using Aer's simulator
lowerCAmelCase__ : str = qiskit.Aer.get_backend('''aer_simulator''' )
# Creating a Quantum Circuit acting on the q register
lowerCAmelCase__ : str = qiskit.QuantumCircuit(A_ , A_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , A_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , A_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(A_ ) ) , list(range(A_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
lowerCAmelCase__ : List[Any] = qiskit.execute(A_ , A_ , shots=10_00 )
return job.result().get_counts(A_ )
if __name__ == "__main__":
print(F'''Total count for various states are: {quantum_entanglement(3)}''')
| 74 | 1 |
def a ( A__ : list ) -> list:
"""simple docstring"""
_lowercase =len(snake_case__ )
for _ in range(snake_case__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
_lowercase , _lowercase =arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
lowercase_ = list(range(1_0, 0, -1))
print(f"Original: {arr}. Sorted: {odd_even_transposition(arr)}")
| 205 |
"""simple docstring"""
from __future__ import annotations
def lowercase (snake_case__ : list[int] ) -> list[int]: # This function is recursive
'''simple docstring'''
lowerCAmelCase = len(snake_case__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCAmelCase = array[0]
lowerCAmelCase = False
lowerCAmelCase = 1
lowerCAmelCase = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCAmelCase = True
lowerCAmelCase = [element for element in array[i:] if element >= array[i]]
lowerCAmelCase = longest_subsequence(snake_case__ )
if len(snake_case__ ) > len(snake_case__ ):
lowerCAmelCase = temp_array
else:
i += 1
lowerCAmelCase = [element for element in array[1:] if element >= pivot]
lowerCAmelCase = [pivot, *longest_subsequence(snake_case__ )]
if len(snake_case__ ) > len(snake_case__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155 | 0 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__A = logging.get_logger(__name__)
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
try:
with open(_A , "rb" ) as flax_state_f:
__lowerCAmelCase: List[str] = from_bytes(_A , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_A ) as f:
if f.read().startswith("version" ):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please"
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
" folder you cloned." )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"Unable to convert {model_file} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(_A , _A )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
__lowerCAmelCase: Tuple = flatten_dict(jax.tree_util.tree_map(lambda __SCREAMING_SNAKE_CASE : x.dtype == jnp.bfloataa , _A ) ).values()
if any(_A ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
__lowerCAmelCase: int = jax.tree_util.tree_map(
lambda __SCREAMING_SNAKE_CASE : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _A )
__lowerCAmelCase: str = ''
__lowerCAmelCase: Optional[int] = flatten_dict(_A , sep="." )
__lowerCAmelCase: Optional[Any] = pt_model.state_dict()
# keep track of unexpected & missing keys
__lowerCAmelCase: int = []
__lowerCAmelCase: Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__lowerCAmelCase: Union[str, Any] = flax_key_tuple.split("." )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__lowerCAmelCase: List[Any] = flax_key_tuple_array[:-1] + ['weight']
__lowerCAmelCase: Optional[int] = jnp.transpose(_A , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__lowerCAmelCase: str = flax_key_tuple_array[:-1] + ['weight']
__lowerCAmelCase: int = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__lowerCAmelCase: List[str] = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_A ):
__lowerCAmelCase: Dict = (
flax_key_tuple_string.replace("_0" , ".0" )
.replace("_1" , ".1" )
.replace("_2" , ".2" )
.replace("_3" , ".3" )
.replace("_4" , ".4" )
.replace("_5" , ".5" )
.replace("_6" , ".6" )
.replace("_7" , ".7" )
.replace("_8" , ".8" )
.replace("_9" , ".9" )
)
__lowerCAmelCase: List[Any] = '.'.join(_A )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
__lowerCAmelCase: List[Any] = np.asarray(_A ) if not isinstance(_A , np.ndarray ) else flax_tensor
__lowerCAmelCase: Union[str, Any] = torch.from_numpy(_A )
# remove from missing keys
missing_keys.remove(_A )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_A )
pt_model.load_state_dict(_A )
# re-transform missing_keys to list
__lowerCAmelCase: Any = list(_A )
if len(_A ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
if len(_A ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
" use it for predictions and inference." )
return pt_model
| 358 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1_0**-1_0 ) -> float:
__lowerCAmelCase: Union[str, Any] = a
while True:
__lowerCAmelCase: Optional[int] = Decimal(__SCREAMING_SNAKE_CASE ) - (
Decimal(eval(__SCREAMING_SNAKE_CASE ) ) / Decimal(eval(str(diff(__SCREAMING_SNAKE_CASE ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__SCREAMING_SNAKE_CASE ) ) < precision: # noqa: S307
return float(__SCREAMING_SNAKE_CASE )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 108 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase_ = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : Tuple = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase__ : Any = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCAmelCase__ : Tuple = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCAmelCase__ : List[str] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Tuple , UpperCamelCase_: str ):
__lowerCamelCase = ZeroShotClassificationPipeline(
model=A_ , tokenizer=A_ , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Dict , UpperCamelCase_: str ):
__lowerCamelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(A_ , {"""sequence""": ANY(A_ ), """labels""": [ANY(A_ )], """scores""": [ANY(A_ )]} )
# No kwarg
__lowerCamelCase = classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(A_ , {"""sequence""": ANY(A_ ), """labels""": [ANY(A_ )], """scores""": [ANY(A_ )]} )
__lowerCamelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(A_ , {"""sequence""": ANY(A_ ), """labels""": [ANY(A_ )], """scores""": [ANY(A_ )]} )
__lowerCamelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
A_ , {"""sequence""": ANY(A_ ), """labels""": [ANY(A_ ), ANY(A_ )], """scores""": [ANY(A_ ), ANY(A_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
__lowerCamelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
A_ , {"""sequence""": ANY(A_ ), """labels""": [ANY(A_ ), ANY(A_ )], """scores""": [ANY(A_ ), ANY(A_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
__lowerCamelCase = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(A_ , {"""sequence""": ANY(A_ ), """labels""": [ANY(A_ )], """scores""": [ANY(A_ )]} )
# https://github.com/huggingface/transformers/issues/13846
__lowerCamelCase = classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
A_ , [
{"""sequence""": ANY(A_ ), """labels""": [ANY(A_ ), ANY(A_ )], """scores""": [ANY(A_ ), ANY(A_ )]}
for i in range(1 )
] , )
__lowerCamelCase = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
A_ , [
{"""sequence""": ANY(A_ ), """labels""": [ANY(A_ ), ANY(A_ )], """scores""": [ANY(A_ ), ANY(A_ )]}
for i in range(2 )
] , )
with self.assertRaises(A_ ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(A_ ):
classifier(A_ , candidate_labels="""politics""" )
with self.assertRaises(A_ ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(A_ ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=A_ )
with self.assertRaises(A_ ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(A_ ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=A_ , )
self.run_entailment_id(A_ )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: List[Any] ):
__lowerCamelCase = zero_shot_classifier.model.config
__lowerCamelCase = config.labelaid
__lowerCamelCase = zero_shot_classifier.entailment_id
__lowerCamelCase = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__lowerCamelCase = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__lowerCamelCase = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__lowerCamelCase = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__lowerCamelCase = original_labelaid
self.assertEqual(A_ , zero_shot_classifier.entailment_id )
@require_torch
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 1_00 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
__lowerCamelCase = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A_ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} , )
@require_tf
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
__lowerCamelCase = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A_ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
__lowerCamelCase = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A_ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} , )
__lowerCamelCase = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A_ , )
self.assertEqual(
nested_simplify(A_ ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
__lowerCamelCase = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A_ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} , )
__lowerCamelCase = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A_ , )
self.assertEqual(
nested_simplify(A_ ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} , )
| 12 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int]=sys.maxsize):
_A : Optional[int] = 'bilinear'
_A : List[Any] = max_size
_A : Tuple = short_edge_length
def __call__( self : Dict , SCREAMING_SNAKE_CASE : int):
_A : List[Any] = []
for img in imgs:
_A , _A : Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
_A : Union[str, Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1)
if size == 0:
return img
_A : Optional[Any] = size * 1.0 / min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
if h < w:
_A , _A : List[Any] = size, scale * w
else:
_A , _A : List[str] = scale * h, size
if max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) > self.max_size:
_A : Any = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_A : str = newh * scale
_A : List[Any] = neww * scale
_A : str = int(neww + 0.5)
_A : List[str] = int(newh + 0.5)
if img.dtype == np.uinta:
_A : int = Image.fromarray(SCREAMING_SNAKE_CASE)
_A : int = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR)
_A : Dict = np.asarray(SCREAMING_SNAKE_CASE)
else:
_A : Optional[Any] = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw
_A : List[str] = nn.functional.interpolate(
SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE).squeeze(0)
img_augs.append(SCREAMING_SNAKE_CASE)
return img_augs
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any]):
_A : List[str] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST)
_A : Tuple = cfg.INPUT.FORMAT
_A : int = cfg.SIZE_DIVISIBILITY
_A : str = cfg.PAD_VALUE
_A : str = cfg.INPUT.MAX_SIZE_TEST
_A : str = cfg.MODEL.DEVICE
_A : Any = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
_A : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1)
_A : Optional[Any] = lambda SCREAMING_SNAKE_CASE: (x - self.pixel_mean) / self.pixel_std
def A ( self : str , SCREAMING_SNAKE_CASE : Optional[int]):
_A : Optional[int] = tuple(max(SCREAMING_SNAKE_CASE) for s in zip(*[img.shape for img in images]))
_A : List[Any] = [im.shape[-2:] for im in images]
_A : str = [
nn.functional.pad(
SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
]
return torch.stack(SCREAMING_SNAKE_CASE), torch.tensor(SCREAMING_SNAKE_CASE)
def __call__( self : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=False):
with torch.no_grad():
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_A : Optional[Any] = [images]
if single_image:
assert len(SCREAMING_SNAKE_CASE) == 1
for i in range(len(SCREAMING_SNAKE_CASE)):
if isinstance(images[i] , torch.Tensor):
images.insert(SCREAMING_SNAKE_CASE , images.pop(SCREAMING_SNAKE_CASE).to(self.device).float())
elif not isinstance(images[i] , torch.Tensor):
images.insert(
SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE) , input_format=self.input_format))
.to(self.device)
.float() , )
# resize smallest edge
_A : Optional[int] = torch.tensor([im.shape[:2] for im in images])
_A : str = self.aug(SCREAMING_SNAKE_CASE)
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_A : Any = [self.normalizer(SCREAMING_SNAKE_CASE) for x in images]
# now pad them to do the following operations
_A , _A : int = self.pad(SCREAMING_SNAKE_CASE)
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_A : Optional[Any] = torch.true_divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ,lowerCamelCase : Optional[int] ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCAmelCase__ ( lowerCamelCase : Dict ,lowerCamelCase : Tuple[int, int] ):
assert torch.isfinite(lowerCamelCase ).all(), "Box tensor contains infinite or NaN!"
_A , _A : Union[str, Any] = box_size
tensor[:, 0].clamp_(min=0 ,max=lowerCamelCase )
tensor[:, 1].clamp_(min=0 ,max=lowerCamelCase )
tensor[:, 2].clamp_(min=0 ,max=lowerCamelCase )
tensor[:, 3].clamp_(min=0 ,max=lowerCamelCase )
| 227 |
'''simple docstring'''
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = 42
a = None
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : Tuple=0.999 ,lowerCamelCase : int="cosine" ,):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase : str ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_A : Tuple = []
for i in range(lowerCamelCase ):
_A : Optional[Any] = i / num_diffusion_timesteps
_A : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ) ,lowerCamelCase ) )
return torch.tensor(lowerCamelCase ,dtype=torch.floataa )
class __lowerCamelCase ( a_ , a_ ):
"""simple docstring"""
a = 1
@register_to_config
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : int = 1000 , SCREAMING_SNAKE_CASE : float = 0.0001 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[Union[np.ndarray, List[float]]] = None , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : float = 1.0 , **SCREAMING_SNAKE_CASE : List[str] , ):
if kwargs.get('set_alpha_to_one' , SCREAMING_SNAKE_CASE) is not None:
_A : Tuple = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' , '1.0.0' , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE)
_A : Tuple = kwargs['set_alpha_to_one']
if trained_betas is not None:
_A : Any = torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa)
elif beta_schedule == "linear":
_A : List[Any] = torch.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_A : List[str] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_A : List[Any] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE)
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}')
_A : Optional[int] = 1.0 - self.betas
_A : Union[str, Any] = torch.cumprod(self.alphas , dim=0)
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_A : Optional[int] = torch.tensor(0.0) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_A : Union[str, Any] = 1.0
# setable values
_A : List[str] = None
_A : Dict = torch.from_numpy(np.arange(0 , SCREAMING_SNAKE_CASE).copy().astype(np.intaa))
def A ( self : str , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Optional[int] = None):
return sample
def A ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, torch.device] = None):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'
F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'
F' maximal {self.config.num_train_timesteps} timesteps.')
_A : Optional[Any] = num_inference_steps
_A : List[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A : List[str] = (np.arange(0 , SCREAMING_SNAKE_CASE) * step_ratio).round().copy().astype(np.intaa)
_A : int = torch.from_numpy(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
self.timesteps += self.config.steps_offset
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : bool = True , ):
# 1. get previous step value (=t+1)
_A : Union[str, Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_A : List[str] = self.alphas_cumprod[timestep]
_A : List[str] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_A : List[str] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_A : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_A : List[Any] = model_output
elif self.config.prediction_type == "sample":
_A : List[Any] = model_output
_A : Dict = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_A : List[str] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_A : Optional[int] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'
' `v_prediction`')
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_A : str = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range)
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_A : Any = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_A : Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , pred_original_sample=SCREAMING_SNAKE_CASE)
def __len__( self : List[Any]):
return self.config.num_train_timesteps
| 227 | 1 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
_lowercase : int = "naver-clova-ix/donut-base"
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = DonutProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase_ : Optional[int] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase_ : str = self.processor.tokenajson(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 93 |
a_ = 8.314_4598
def lowerCamelCase__ ( _a , _a):
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K")
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol")
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a_ = 300
a_ = 28
a_ = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''') | 76 | 0 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> np.array:
UpperCamelCase_: Dict = F'''{sampling_rate}'''
UpperCamelCase_: Any = '1'
UpperCamelCase_: Any = 'f32le'
UpperCamelCase_: Union[str, Any] = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(UpperCAmelCase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCamelCase_: Optional[Any] = ffmpeg_process.communicate(UpperCAmelCase__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
UpperCamelCase_: Union[str, Any] = output_stream[0]
UpperCamelCase_: List[str] = np.frombuffer(UpperCAmelCase__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = "f32le" , ) -> Tuple:
UpperCamelCase_: Any = F'''{sampling_rate}'''
UpperCamelCase_: Union[str, Any] = '1'
if format_for_conversion == "s16le":
UpperCamelCase_: Optional[Any] = 2
elif format_for_conversion == "f32le":
UpperCamelCase_: Any = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCamelCase_: int = platform.system()
if system == "Linux":
UpperCamelCase_: Tuple = 'alsa'
UpperCamelCase_: List[str] = 'default'
elif system == "Darwin":
UpperCamelCase_: int = 'avfoundation'
UpperCamelCase_: Union[str, Any] = ':0'
elif system == "Windows":
UpperCamelCase_: Tuple = 'dshow'
UpperCamelCase_: Dict = 'default'
UpperCamelCase_: Any = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
UpperCamelCase_: Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCamelCase_: Optional[int] = _ffmpeg_stream(UpperCAmelCase__ , UpperCAmelCase__ )
for item in iterator:
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = "f32le" , ) -> Any:
if stream_chunk_s is not None:
UpperCamelCase_: List[Any] = stream_chunk_s
else:
UpperCamelCase_: Dict = chunk_length_s
UpperCamelCase_: List[str] = ffmpeg_microphone(UpperCAmelCase__ , UpperCAmelCase__ , format_for_conversion=UpperCAmelCase__ )
if format_for_conversion == "s16le":
UpperCamelCase_: Union[str, Any] = np.intaa
UpperCamelCase_: List[Any] = 2
elif format_for_conversion == "f32le":
UpperCamelCase_: str = np.floataa
UpperCamelCase_: Tuple = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCamelCase_: int = chunk_length_s / 6
UpperCamelCase_: Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(UpperCAmelCase__ , (int, float) ):
UpperCamelCase_: Union[str, Any] = [stride_length_s, stride_length_s]
UpperCamelCase_: Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCamelCase_: Dict = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCamelCase_: Optional[int] = datetime.datetime.now()
UpperCamelCase_: Optional[int] = datetime.timedelta(seconds=UpperCAmelCase__ )
for item in chunk_bytes_iter(UpperCAmelCase__ , UpperCAmelCase__ , stride=(stride_left, stride_right) , stream=UpperCAmelCase__ ):
# Put everything back in numpy scale
UpperCamelCase_: Tuple = np.frombuffer(item['raw'] , dtype=UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
UpperCamelCase_: int = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False ) -> int:
UpperCamelCase_: str = b''
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCamelCase_: List[str] = 0
for raw in iterator:
acc += raw
if stream and len(UpperCAmelCase__ ) < chunk_len:
UpperCamelCase_: Optional[Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(UpperCAmelCase__ ) >= chunk_len:
# We are flushing the accumulator
UpperCamelCase_: int = (_stride_left, stride_right)
UpperCamelCase_: Optional[Any] = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
UpperCamelCase_: Any = False
yield item
UpperCamelCase_: Optional[int] = stride_left
UpperCamelCase_: Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(UpperCAmelCase__ ) > stride_left:
UpperCamelCase_: int = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
UpperCamelCase_: Optional[Any] = False
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
UpperCamelCase_: Any = 2**2_4 # 16Mo
try:
with subprocess.Popen(UpperCAmelCase__ , stdout=subprocess.PIPE , bufsize=UpperCAmelCase__ ) as ffmpeg_process:
while True:
UpperCamelCase_: Any = ffmpeg_process.stdout.read(UpperCAmelCase__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error | 292 |
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
while b:
UpperCamelCase_ ,UpperCamelCase_: int = b, a % b
return a
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase__ , a % b )
def snake_case () -> int:
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main() | 292 | 1 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def UpperCAmelCase__ (snake_case__ : Iterable[str] , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[Any] = iter(snake_case__ )
while True:
_snake_case : List[str] = tuple(itertools.islice(snake_case__ , snake_case__ ) )
if not chunk:
return
yield chunk
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Union[str, Any] = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
_snake_case : List[str] = """"""
if len(snake_case__ ) < 2:
return dirty
for i in range(len(snake_case__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(snake_case__ ) & 1:
clean += "X"
return clean
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Dict = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_snake_case : List[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(snake_case__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(snake_case__ )
return table
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[int] = generate_table(snake_case__ )
_snake_case : Tuple = prepare_input(snake_case__ )
_snake_case : int = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(snake_case__ , 2 ):
_snake_case , _snake_case : int = divmod(table.index(snake_case__ ) , 5 )
_snake_case , _snake_case : Dict = divmod(table.index(snake_case__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
_snake_case : Union[str, Any] = generate_table(snake_case__ )
_snake_case : List[Any] = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(snake_case__ , 2 ):
_snake_case , _snake_case : Optional[int] = divmod(table.index(snake_case__ ) , 5 )
_snake_case , _snake_case : Tuple = divmod(table.index(snake_case__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 64 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
__snake_case : Optional[str] = field(
default="linear" , metadata={"help": F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 296 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : int ) -> list:
'''simple docstring'''
UpperCAmelCase_ = word.split()
def justify(snake_case_ : list , snake_case_ : int , snake_case_ : int ) -> str:
UpperCAmelCase_ = max_width - width
UpperCAmelCase_ = len(snake_case_ )
if len(snake_case_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCAmelCase_ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCAmelCase_ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCAmelCase_ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(snake_case_ ):
num_spaces_between_words_list[i] += 1
UpperCAmelCase_ = []
for i in range(snake_case_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(snake_case_ )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
for word in words:
if width + len(snake_case_ ) + len(snake_case_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(snake_case_ )
width += len(snake_case_ )
else:
# justify the line and add it to result
answer.append(justify(snake_case_ , snake_case_ , snake_case_ ) )
# reset new line and new width
UpperCAmelCase_ , UpperCAmelCase_ = [word], len(snake_case_ )
UpperCAmelCase_ = max_width - width - len(snake_case_ )
answer.append(" ".join(snake_case_ ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 358 | '''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Dict ={
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
SCREAMING_SNAKE_CASE_: List[str] =[
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Any ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
UpperCAmelCase_ = getattr(snake_case_ , snake_case_ ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : List[str] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
UpperCAmelCase_ = None
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
elif name.split("." )[0] == "proj":
UpperCAmelCase_ = fairseq_model.proj
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(snake_case_ )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "bias" in name:
UpperCAmelCase_ = "bias"
elif "weight" in name:
UpperCAmelCase_ = "weight"
else:
UpperCAmelCase_ = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Any , snake_case_ : int ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = emb.weight.shape
UpperCAmelCase_ = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
UpperCAmelCase_ = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( snake_case_ : int ) -> List[str]:
'''simple docstring'''
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [line.split(" " )[0] for line in lines]
UpperCAmelCase_ = len(snake_case_ )
UpperCAmelCase_ = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(snake_case_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Any , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = WavaVecaConfig.from_pretrained(snake_case_ )
UpperCAmelCase_ = SpeechaTextaConfig.from_pretrained(
snake_case_ , vocab_size=snake_case_ , decoder_layers=snake_case_ , do_stable_layer_norm=snake_case_ )
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
UpperCAmelCase_ = model[0].eval()
# set weights for wav2vec2 encoder
UpperCAmelCase_ = WavaVecaModel(snake_case_ )
UpperCAmelCase_ = recursively_load_weights_wavaveca(model.encoder , snake_case_ )
UpperCAmelCase_ = SpeechaTextaForCausalLM(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case_ )
# set output linear layer
unexpected_keys.remove("embed_out" )
UpperCAmelCase_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
UpperCAmelCase_ = SpeechEncoderDecoderModel(encoder=snake_case_ , decoder=snake_case_ )
UpperCAmelCase_ = False
# add projection layer
UpperCAmelCase_ = nn.Parameter(projection_layer.weight )
UpperCAmelCase_ = nn.Parameter(projection_layer.bias )
UpperCAmelCase_ = create_vocab_dict(snake_case_ )
with open(os.path.join(snake_case_ , "vocab.json" ) , "w" ) as fp:
json.dump(snake_case_ , snake_case_ )
UpperCAmelCase_ = SpeechaTextaTokenizer(os.path.join(snake_case_ , "vocab.json" ) )
tokenizer.save_pretrained(snake_case_ )
UpperCAmelCase_ = hf_wavavec.config.to_dict()
UpperCAmelCase_ = tokenizer.pad_token_id
UpperCAmelCase_ = tokenizer.bos_token_id
UpperCAmelCase_ = tokenizer.eos_token_id
UpperCAmelCase_ = "speech_to_text_2"
UpperCAmelCase_ = "wav2vec2"
UpperCAmelCase_ = SpeechEncoderDecoderConfig.from_dict(snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
feature_extractor.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 106 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _snake_case ( __snake_case ):
'''simple docstring'''
def __init__( self: Tuple ,lowerCamelCase_: Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Tuple:
super().__init__()
UpperCAmelCase_ : Optional[Any] = nn.ModuleList(lowerCamelCase_ )
def A__ ( self: str ,lowerCamelCase_: torch.FloatTensor ,lowerCamelCase_: Union[torch.Tensor, float, int] ,lowerCamelCase_: torch.Tensor ,lowerCamelCase_: List[torch.tensor] ,lowerCamelCase_: List[float] ,lowerCamelCase_: Optional[torch.Tensor] = None ,lowerCamelCase_: Optional[torch.Tensor] = None ,lowerCamelCase_: Optional[torch.Tensor] = None ,lowerCamelCase_: Optional[Dict[str, Any]] = None ,lowerCamelCase_: bool = False ,lowerCamelCase_: bool = True ,) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ ,lowerCamelCase_ ,self.nets ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = controlnet(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,)
# merge samples
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ : str = down_samples, mid_sample
else:
UpperCAmelCase_ : Tuple = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ ,lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A__ ( self: List[str] ,lowerCamelCase_: Union[str, os.PathLike] ,lowerCamelCase_: bool = True ,lowerCamelCase_: Callable = None ,lowerCamelCase_: bool = False ,lowerCamelCase_: Optional[str] = None ,) -> Dict:
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : str = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ ,is_main_process=lowerCamelCase_ ,save_function=lowerCamelCase_ ,safe_serialization=lowerCamelCase_ ,variant=lowerCamelCase_ ,)
idx += 1
UpperCAmelCase_ : List[Any] = model_path_to_save + F'''_{idx}'''
@classmethod
def A__ ( cls: Tuple ,lowerCamelCase_: Optional[Union[str, os.PathLike]] ,**lowerCamelCase_: List[str] ) -> Optional[int]:
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : List[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
UpperCAmelCase_ : Any = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
UpperCAmelCase_ : Tuple = ControlNetModel.from_pretrained(lowerCamelCase_ ,**lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
UpperCAmelCase_ : List[str] = pretrained_model_path + F'''_{idx}'''
logger.info(F'''{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
F'''No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowerCamelCase_ )
| 345 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : int = resnets
UpperCAmelCase_ : Tuple = attentions
if self.add_downsample:
UpperCAmelCase_ : List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int=True ) -> int:
UpperCAmelCase_ : List[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
UpperCAmelCase_ : str = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[Any] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> int:
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : Dict = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnets
if self.add_downsample:
UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any]=True ) -> Any:
UpperCAmelCase_ : Union[str, Any] = ()
for resnet in self.resnets:
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[str] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : int = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = resnets
UpperCAmelCase_ : Dict = attentions
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[str]=True ) -> List[str]:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
UpperCAmelCase_ : List[str] = res_hidden_states_tuple[-1]
UpperCAmelCase_ : Union[str, Any] = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Dict = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> Dict:
UpperCAmelCase_ : Any = []
for i in range(self.num_layers ):
UpperCAmelCase_ : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : str = resnets
if self.add_upsample:
UpperCAmelCase_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=True ) -> List[str]:
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase_ : Dict = res_hidden_states_tuple[-1]
UpperCAmelCase_ : str = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : List[str] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
# there is always at least one resnet
UpperCAmelCase_ : List[Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
UpperCAmelCase_ : Any = []
for _ in range(self.num_layers ):
UpperCAmelCase_ : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Dict = resnets
UpperCAmelCase_ : Any = attentions
def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=True ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.resnets[0](lowerCamelCase_ ,lowerCamelCase_ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
UpperCAmelCase_ : Optional[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
return hidden_states
| 345 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case : List[str] = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
snake_case : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 356 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case : Any = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
snake_case : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 281 | 0 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _lowerCAmelCase ( _UpperCamelCase : Callable , _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> np.array:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =int(np.ceil((x_end - xa) / step_size ) )
_SCREAMING_SNAKE_CASE =np.zeros((n + 1,) )
_SCREAMING_SNAKE_CASE =ya
_SCREAMING_SNAKE_CASE =xa
for k in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =y[k] + step_size * ode_func(_UpperCamelCase , y[k] )
_SCREAMING_SNAKE_CASE =y[k] + (
(step_size / 2) * (ode_func(_UpperCamelCase , y[k] ) + ode_func(x + step_size , _UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase : int = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : List[Any] , _a : List[str]=7 , _a : List[str]=3 , _a : Tuple=18 , _a : Tuple=30 , _a : str=400 , _a : Tuple=None , _a : Union[str, Any]=True , _a : List[str]=True , _a : Optional[int]=None , ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =size if size is not None else {'height': 20, 'width': 20}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =do_convert_rgb
_SCREAMING_SNAKE_CASE =[512, 1024, 2048, 4096]
_SCREAMING_SNAKE_CASE =patch_size if patch_size is not None else {'height': 16, 'width': 16}
def A ( self : Any ) -> List[str]:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def A ( self : int ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
_SCREAMING_SNAKE_CASE =Image.open(requests.get(_a , stream=_a ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class A__ ( A__ , unittest.TestCase ):
A__ = PixaStructImageProcessor if is_vision_available() else None
def A ( self : Dict ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =PixaStructImageProcessingTester(self )
@property
def A ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Any ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , 'do_normalize' ) )
self.assertTrue(hasattr(_a , 'do_convert_rgb' ) )
def A ( self : Any ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_dummy_image()
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
_SCREAMING_SNAKE_CASE =2048
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='pt' , max_patches=_a )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1e-3 , rtol=1e-3 ) )
def A ( self : Any ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
_SCREAMING_SNAKE_CASE =True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_a ):
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
_SCREAMING_SNAKE_CASE ='Hello'
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a , header_text=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a , header_text=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A ( self : List[Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def A ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class A__ ( A__ , unittest.TestCase ):
A__ = PixaStructImageProcessor if is_vision_available() else None
def A ( self : str ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =PixaStructImageProcessingTester(self , num_channels=4 )
_SCREAMING_SNAKE_CASE =3
@property
def A ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[str] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , 'do_normalize' ) )
self.assertTrue(hasattr(_a , 'do_convert_rgb' ) )
def A ( self : Dict ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processor(
_a , return_tensors='pt' , max_patches=_a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 47 | 1 |
import math
def __A ( _lowercase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __A ( _lowercase = 1_00_01 ):
'''simple docstring'''
try:
_A = int(_lowercase )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
_A = []
_A = 2
while len(_lowercase ) < nth:
if is_prime(_lowercase ):
primes.append(_lowercase )
num += 1
else:
num += 1
return primes[len(_lowercase ) - 1]
if __name__ == "__main__":
print(f'{solution() = }')
| 75 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "facebook/bart-large-mnli"
A_ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
A_ = "text_classifier"
A_ = AutoTokenizer
A_ = AutoModelForSequenceClassification
A_ = ["text", ["text"]]
A_ = ["text"]
def __A ( self: int ) -> str:
super().setup()
_A = self.model.config
_A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
_A = int(__A )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def __A ( self: Union[str, Any] , __A: Union[str, Any] , __A: List[str] ) -> int:
_A = labels
return self.pre_processor(
[text] * len(__A ) , [f"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def __A ( self: str , __A: List[Any] ) -> Union[str, Any]:
_A = outputs.logits
_A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 75 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : str = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase_ : int = 1_92
lowercase_ : List[str] = 7_68
lowercase_ : Optional[Any] = 12
lowercase_ : Tuple = 3
lowercase_ : Union[str, Any] = [8_00, 13_33]
lowercase_ : List[str] = False
elif yolos_name == "yolos_s_dWr":
lowercase_ : Optional[int] = 3_30
lowercase_ : Tuple = 14
lowercase_ : Union[str, Any] = 6
lowercase_ : str = 13_20
elif "yolos_s" in yolos_name:
lowercase_ : Tuple = 3_84
lowercase_ : Optional[Any] = 15_36
lowercase_ : List[Any] = 12
lowercase_ : List[Any] = 6
elif "yolos_b" in yolos_name:
lowercase_ : Dict = [8_00, 13_44]
lowercase_ : Optional[Any] = 91
lowercase_ : Union[str, Any] = 'huggingface/label-files'
lowercase_ : Union[str, Any] = 'coco-detection-id2label.json'
lowercase_ : str = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase_ : Tuple = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase_ : str = idalabel
lowercase_ : int = {v: k for k, v in idalabel.items()}
return config
def lowercase__( __SCREAMING_SNAKE_CASE : dict , __SCREAMING_SNAKE_CASE : YolosConfig , __SCREAMING_SNAKE_CASE : bool = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ : Optional[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
lowercase_ : List[str] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Any = in_proj_weight[: config.hidden_size, :]
lowercase_ : Optional[Any] = in_proj_bias[: config.hidden_size]
lowercase_ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ : int = in_proj_weight[-config.hidden_size :, :]
lowercase_ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
if "backbone" in name:
lowercase_ : str = name.replace('backbone' , 'vit' )
if "cls_token" in name:
lowercase_ : Any = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
lowercase_ : Tuple = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
lowercase_ : int = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
lowercase_ : Optional[Any] = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
lowercase_ : Union[str, Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
lowercase_ : Any = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
lowercase_ : Union[str, Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
lowercase_ : str = name.replace('attn' , 'attention.self' )
if "norm1" in name:
lowercase_ : str = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
lowercase_ : str = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
lowercase_ : List[str] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowercase_ : Tuple = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
lowercase_ : Optional[Any] = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
lowercase_ : List[Any] = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
lowercase_ : Tuple = name.replace('vit.norm' , 'vit.layernorm' )
return name
def lowercase__( __SCREAMING_SNAKE_CASE : dict , __SCREAMING_SNAKE_CASE : YolosForObjectDetection ):
for key in orig_state_dict.copy().keys():
lowercase_ : Dict = orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if "qkv" in key:
lowercase_ : Optional[int] = key.split('.' )
lowercase_ : Any = int(key_split[2] )
lowercase_ : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase_ : Optional[int] = val[:dim, :]
lowercase_ : Any = val[
dim : dim * 2, :
]
lowercase_ : Dict = val[-dim:, :]
else:
lowercase_ : int = val[:dim]
lowercase_ : List[Any] = val[dim : dim * 2]
lowercase_ : Dict = val[-dim:]
else:
lowercase_ : Any = val
return orig_state_dict
def lowercase__( ):
lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase_ : Dict = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : bool = False ):
lowercase_ : Tuple = get_yolos_config(__SCREAMING_SNAKE_CASE )
# load original state_dict
lowercase_ : int = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
# load 🤗 model
lowercase_ : Tuple = YolosForObjectDetection(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : int = convert_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by YolosImageProcessor
lowercase_ : Tuple = 8_00 if yolos_name != 'yolos_ti' else 5_12
lowercase_ : str = YolosImageProcessor(format='coco_detection' , size=__SCREAMING_SNAKE_CASE )
lowercase_ : int = image_processor(images=prepare_img() , return_tensors='pt' )
lowercase_ : Dict = model(**__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : Union[str, Any] = outputs.logits, outputs.pred_boxes
lowercase_ , lowercase_ : List[str] = None, None
if yolos_name == "yolos_ti":
lowercase_ : List[str] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
lowercase_ : Optional[int] = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
lowercase_ : Union[str, Any] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
lowercase_ : List[str] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
lowercase_ : List[str] = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
lowercase_ : Union[str, Any] = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
lowercase_ : List[str] = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
lowercase_ : Dict = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
lowercase_ : Union[str, Any] = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
lowercase_ : int = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
lowercase_ : Optional[Any] = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
lowercase_ : Any = model_mapping[yolos_name]
image_processor.push_to_hub(__SCREAMING_SNAKE_CASE , organization='hustvl' )
model.push_to_hub(__SCREAMING_SNAKE_CASE , organization='hustvl' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 213 | """simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
| 213 | 1 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Optional[Any] =CLIPTokenizer
lowercase_ : int =CLIPTokenizerFast
lowercase_ : str =True
lowercase_ : Dict ={}
lowercase_ : Any =False
def A__ ( self):
super().setUp()
# fmt: off
lowercase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
lowercase = dict(zip(A__ ,range(len(A__))))
lowercase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
lowercase = {'''unk_token''': '''<unk>'''}
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''])
lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''') as fp:
fp.write(json.dumps(A__) + '''\n''')
with open(self.merges_file ,'''w''' ,encoding='''utf-8''') as fp:
fp.write('''\n'''.join(A__))
def A__ ( self ,**A__):
kwargs.update(self.special_tokens_map)
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**A__)
def A__ ( self ,**A__):
kwargs.update(self.special_tokens_map)
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**A__)
def A__ ( self ,A__):
lowercase = '''lower newer'''
lowercase = '''lower newer'''
return input_text, output_text
def A__ ( self):
lowercase = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map)
lowercase = '''lower newer'''
lowercase = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
lowercase = tokenizer.tokenize(A__)
self.assertListEqual(A__ ,A__)
lowercase = tokens + [tokenizer.unk_token]
lowercase = [1_0, 2, 1_6, 9, 3, 2, 1_6, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__) ,A__)
@require_ftfy
def A__ ( self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase = self.tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = self.rust_tokenizer_class.from_pretrained(A__ ,**A__)
lowercase = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
lowercase = tokenizer_s.tokenize(A__)
lowercase = tokenizer_r.tokenize(A__)
self.assertListEqual(A__ ,A__)
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowercase = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
lowercase = tokenizer_s.tokenize(A__)
lowercase = tokenizer_r.tokenize(A__)
self.assertListEqual(A__ ,A__)
# Test that the tokenization is identical on unicode of space type
lowercase = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowercase = tokenizer_s.tokenize(A__)
lowercase = tokenizer_r.tokenize(A__)
self.assertListEqual(A__ ,A__)
# Test that the tokenization is identical on unicode of line break type
lowercase = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowercase = tokenizer_s.tokenize(A__)
lowercase = tokenizer_r.tokenize(A__)
self.assertListEqual(A__ ,A__)
def A__ ( self):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowercase = f'{text_of_1_token} {text_of_1_token}'
lowercase = self.rust_tokenizer_class.from_pretrained(
A__ ,use_fast=A__ ,)
lowercase = tokenizer_r(A__ ,return_offsets_mapping=A__ ,add_special_tokens=A__)
self.assertEqual(encoding.offset_mapping[0] ,(0, len(A__)))
self.assertEqual(
encoding.offset_mapping[1] ,(len(A__) + 1, len(A__) + 1 + len(A__)) ,)
lowercase = f' {text}'
lowercase = self.rust_tokenizer_class.from_pretrained(
A__ ,use_fast=A__ ,)
lowercase = tokenizer_r(A__ ,return_offsets_mapping=A__ ,add_special_tokens=A__)
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(A__)))
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(A__) + 1, 1 + len(A__) + 1 + len(A__)) ,)
def A__ ( self):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(A__) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''')
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.'''))
@require_ftfy
def A__ ( self):
super().test_tokenization_python_rust_equals()
def A__ ( self):
# CLIP always lower cases letters
pass
| 97 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ :Union[str, Any] = 16
lowercase__ :Optional[Any] = 32
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ = 16 ):
'''simple docstring'''
lowercase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowercase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase = 16
elif accelerator.mixed_precision != "no":
lowercase = 8
else:
lowercase = None
return tokenizer.pad(
lowerCAmelCase__ , padding='''longest''' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
lowercase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ :List[str] = mocked_dataloaders # noqa: F811
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowerCAmelCase__ ) == "1":
lowercase = 2
# New Code #
lowercase = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowercase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCAmelCase__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase = config['''lr''']
lowercase = int(config['''num_epochs'''] )
lowercase = int(config['''seed'''] )
lowercase = int(config['''batch_size'''] )
lowercase = evaluate.load('''glue''' , '''mrpc''' )
set_seed(lowerCAmelCase__ )
lowercase , lowercase = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase = model.to(accelerator.device )
# Instantiate optimizer
lowercase = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
# Instantiate scheduler
lowercase = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowerCAmelCase__ ):
lowercase = model(**lowerCAmelCase__ )
lowercase = output.loss
accelerator.backward(lowerCAmelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase = model(**lowerCAmelCase__ )
lowercase = outputs.logits.argmax(dim=-1 )
lowercase , lowercase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
lowercase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , lowerCAmelCase__ )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=lowerCAmelCase__ , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase = parser.parse_args()
lowercase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 97 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.