code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : pyspark.sql.DataFrame , lowerCAmelCase : Optional[NamedSplit] = None , lowerCAmelCase : Optional[Features] = None , lowerCAmelCase : bool = True , lowerCAmelCase : str = None , lowerCAmelCase : bool = False , lowerCAmelCase : str = None , lowerCAmelCase : bool = True , lowerCAmelCase : str = "arrow" , **lowerCAmelCase : Optional[Any] , ) -> Any:
"""simple docstring"""
super().__init__(
split=lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase , streaming=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = load_from_cache_file
lowercase__ = file_format
lowercase__ = Spark(
df=lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase , working_dir=lowerCAmelCase , **lowerCAmelCase , )
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
lowercase__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split)
| 642
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Tuple = {"vocab_file": "vocab.txt"}
a__ : int = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
a__ : Dict = {
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def _lowerCAmelCase ( A__ ):
with open(A__ , 'r' ) as f:
lowercase__ = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]="<unk>" , lowerCAmelCase : Dict="<cls>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : Union[str, Any]="<mask>" , lowerCAmelCase : Optional[Any]="<eos>" , **lowerCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = load_vocab_file(lowerCAmelCase)
lowercase__ = dict(enumerate(self.all_tokens))
lowercase__ = {tok: ind for ind, tok in enumerate(self.all_tokens)}
lowercase__ = unk_token
lowercase__ = cls_token
lowercase__ = pad_token
lowercase__ = mask_token
lowercase__ = eos_token
lowercase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens)
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> Dict:
"""simple docstring"""
return text.split()
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any=False) -> Union[str, Any]:
"""simple docstring"""
return len(self._id_to_token)
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens)}
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Dict , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.cls_token_id]
lowercase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!')
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List , lowerCAmelCase : Optional[List] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase__ = [1] + ([0] * len(lowerCAmelCase)) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCAmelCase) + [1]
return mask
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = os.path.join(lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt')
with open(lowerCAmelCase , 'w') as f:
f.write('\n'.join(self.all_tokens))
return (vocab_file,)
@property
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Union[List[str], List[AddedToken]] , lowerCAmelCase : bool = False) -> int:
"""simple docstring"""
return super()._add_tokens(lowerCAmelCase , special_tokens=lowerCAmelCase)
| 642
| 1
|
from sklearn.metrics import matthews_corrcoef
import datasets
a__ : Optional[Any] = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
a__ : List[Any] = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
a__ : List[Any] = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int32'),
'references': datasets.Value('int32'),
}) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
] , )
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : str=None) -> Optional[int]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(lowerCAmelCase , lowerCAmelCase , sample_weight=lowerCAmelCase)),
}
| 642
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a__ : int = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
a__ : Optional[Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
a__ : Tuple = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any]) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def UpperCAmelCase ( self : int , lowerCAmelCase : List[List[List[str]]] , lowerCAmelCase : List[List[str]] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase , hypotheses=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase)
}
| 642
| 1
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Any = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : Dict=None , lowerCAmelCase : List[str]=None , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any]) -> str:
"""simple docstring"""
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
if config is None:
assert isinstance(self.model , lowerCAmelCase), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
lowercase__ = self.model.config
else:
lowercase__ = config
lowercase__ = data_args
lowercase__ = self.config.tgt_vocab_size if isinstance(self.config , lowerCAmelCase) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
' padding..')
if self.args.label_smoothing == 0:
lowercase__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase__ = label_smoothed_nll_loss
def UpperCAmelCase ( self : Any , lowerCAmelCase : int) -> Any:
"""simple docstring"""
if self.optimizer is None:
lowercase__ = ['bias', 'LayerNorm.weight']
lowercase__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
'weight_decay': 0.0,
},
]
lowercase__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase__ = Adafactor
lowercase__ = {'scale_parameter': False, 'relative_step': False}
else:
lowercase__ = AdamW
lowercase__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
lowercase__ = self.args.learning_rate
if self.sharded_ddp:
lowercase__ = OSS(
params=lowerCAmelCase , optim=lowerCAmelCase , **lowerCAmelCase , )
else:
lowercase__ = optimizer_cls(lowerCAmelCase , **lowerCAmelCase)
if self.lr_scheduler is None:
lowercase__ = self._get_lr_scheduler(lowerCAmelCase)
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.')
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any]) -> Dict:
"""simple docstring"""
lowercase__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase__ = schedule_func(self.optimizer)
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase__ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps)
else:
lowercase__ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=lowerCAmelCase)
return scheduler
def UpperCAmelCase ( self : List[Any]) -> Optional[torch.utils.data.Sampler]:
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset)
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset)
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset)
)
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]) -> Tuple:
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase__ = model(**lowerCAmelCase , use_cache=lowerCAmelCase)[0]
lowercase__ = self.loss_fn(logits.view(-1 , logits.shape[-1]) , labels.view(-1))
else:
# compute usual loss via models
lowercase__, lowercase__ = model(**lowerCAmelCase , labels=lowerCAmelCase , use_cache=lowerCAmelCase)[:2]
else:
# compute label smoothed loss
lowercase__ = model(**lowerCAmelCase , use_cache=lowerCAmelCase)[0]
lowercase__ = torch.nn.functional.log_softmax(lowerCAmelCase , dim=-1)
lowercase__, lowercase__ = self.loss_fn(lowerCAmelCase , lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id)
return loss, logits
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Dict) -> Optional[int]:
"""simple docstring"""
lowercase__ = inputs.pop('labels')
lowercase__, lowercase__ = self._compute_loss(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
return loss
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : nn.Module , lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] , lowerCAmelCase : bool , lowerCAmelCase : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""simple docstring"""
lowercase__ = self._prepare_inputs(lowerCAmelCase)
lowercase__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase__ = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **lowerCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase__ = self._pad_tensors_to_max_len(lowerCAmelCase , gen_kwargs['max_length'])
lowercase__ = inputs.pop('labels')
with torch.no_grad():
# compute loss on predict data
lowercase__, lowercase__ = self._compute_loss(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
lowercase__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase__ = self._pad_tensors_to_max_len(lowerCAmelCase , gen_kwargs['max_length'])
return (loss, logits, labels)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict , lowerCAmelCase : Tuple) -> Tuple:
"""simple docstring"""
lowercase__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
f''' padded to `max_length`={max_length}''')
lowercase__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device)
lowercase__ = tensor
return padded_tensor
| 642
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Dict=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=99 , lowerCAmelCase : List[Any]=[1, 1, 2] , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : int=32 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : int=37 , lowerCAmelCase : Any="gelu_new" , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : str=5_12 , lowerCAmelCase : str=3 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]=False , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = block_sizes
lowercase__ = num_decoder_layers
lowercase__ = d_model
lowercase__ = n_head
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = hidden_act
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = 2
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase__ = n_head
# Used in the tests to check the size of the first hidden state
lowercase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase__ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase__ = self.num_hidden_layers + 2
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , ) -> str:
"""simple docstring"""
lowercase__ = TFFunnelForPreTraining(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForMaskedLM(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForSequenceClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = TFFunnelForMultipleChoice(config=lowerCAmelCase)
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForTokenClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForQuestionAnswering(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
A : Dict = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Optional[int] = False
A : Optional[int] = False
def UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase)
@require_tf
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
A : List[str] = False
A : int = False
def UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self , base=lowerCAmelCase)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase)
| 642
| 1
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
lowercase__ = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small')
lowercase__ = AutoTokenizer.from_pretrained('google/mt5-small')
lowercase__ = tokenizer('Hello there' , return_tensors='np').input_ids
lowercase__ = tokenizer('Hi I am' , return_tensors='np').input_ids
lowercase__ = shift_tokens_right(lowerCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id)
lowercase__ = model(lowerCAmelCase , decoder_input_ids=lowerCAmelCase).logits
lowercase__ = optax.softmax_cross_entropy(lowerCAmelCase , onehot(lowerCAmelCase , logits.shape[-1])).mean()
lowercase__ = -(labels.shape[-1] * loss.item())
lowercase__ = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1E-4)
| 642
|
def _lowerCAmelCase ( A__ , A__ , A__ ):
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
lowercase__ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowercase__ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 1
|
def _lowerCAmelCase ( A__ = 1_000 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 642
|
from __future__ import annotations
def _lowerCAmelCase ( A__ , A__ ):
if b == 0:
return (1, 0)
((lowercase__), (lowercase__)) = extended_euclid(A__ , a % b )
lowercase__ = a // b
return (y, x - k * y)
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
def _lowerCAmelCase ( A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
if b < 0:
lowercase__ = (b % n + n) % n
return b
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__, lowercase__ = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 642
| 1
|
import requests
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = {'Content-Type': 'application/json'}
lowercase__ = requests.post(A__ , json={'text': message_body} , headers=A__ )
if response.status_code != 200:
lowercase__ = (
'Request to slack returned an error '
F'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(A__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 642
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[Any] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = "umt5"
A : List[str] = ["past_key_values"]
def __init__( self : List[Any] , lowerCAmelCase : Optional[int]=25_01_12 , lowerCAmelCase : str=5_12 , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : int=32 , lowerCAmelCase : int=1_28 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=1E-6 , lowerCAmelCase : Optional[int]=1.0 , lowerCAmelCase : Optional[Any]="gated-gelu" , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]="T5Tokenizer" , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=0 , **lowerCAmelCase : int , ) -> str:
"""simple docstring"""
super().__init__(
is_encoder_decoder=lowerCAmelCase , tokenizer_class=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_kv
lowercase__ = d_ff
lowercase__ = num_layers
lowercase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ = num_heads
lowercase__ = relative_attention_num_buckets
lowercase__ = relative_attention_max_distance
lowercase__ = dropout_rate
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_factor
lowercase__ = feed_forward_proj
lowercase__ = use_cache
lowercase__ = self.feed_forward_proj.split('-')
lowercase__ = act_info[-1]
lowercase__ = act_info[0] == 'gated'
if len(lowerCAmelCase) > 1 and act_info[0] != "gated" or len(lowerCAmelCase) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
lowercase__ = 'gelu_new'
@property
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
return self.d_model
@property
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
return self.num_heads
@property
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return self.num_layers
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCAmelCase ( self : Optional[int]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase__ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowercase__ = 'past_encoder_sequence + sequence'
lowercase__ = {0: 'batch'}
lowercase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return 13
@property
def UpperCAmelCase ( self : Optional[Any]) -> float:
"""simple docstring"""
return 5E-4
| 642
| 1
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[Any] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = "umt5"
A : List[str] = ["past_key_values"]
def __init__( self : List[Any] , lowerCAmelCase : Optional[int]=25_01_12 , lowerCAmelCase : str=5_12 , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : int=32 , lowerCAmelCase : int=1_28 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=1E-6 , lowerCAmelCase : Optional[int]=1.0 , lowerCAmelCase : Optional[Any]="gated-gelu" , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]="T5Tokenizer" , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=0 , **lowerCAmelCase : int , ) -> str:
"""simple docstring"""
super().__init__(
is_encoder_decoder=lowerCAmelCase , tokenizer_class=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_kv
lowercase__ = d_ff
lowercase__ = num_layers
lowercase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ = num_heads
lowercase__ = relative_attention_num_buckets
lowercase__ = relative_attention_max_distance
lowercase__ = dropout_rate
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_factor
lowercase__ = feed_forward_proj
lowercase__ = use_cache
lowercase__ = self.feed_forward_proj.split('-')
lowercase__ = act_info[-1]
lowercase__ = act_info[0] == 'gated'
if len(lowerCAmelCase) > 1 and act_info[0] != "gated" or len(lowerCAmelCase) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
lowercase__ = 'gelu_new'
@property
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
return self.d_model
@property
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
return self.num_heads
@property
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return self.num_layers
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCAmelCase ( self : Optional[int]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase__ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowercase__ = 'past_encoder_sequence + sequence'
lowercase__ = {0: 'batch'}
lowercase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return 13
@property
def UpperCAmelCase ( self : Optional[Any]) -> float:
"""simple docstring"""
return 5E-4
| 642
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : str = XGLMTokenizer
A : List[Any] = XGLMTokenizerFast
A : int = True
A : Optional[Any] = True
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = '<pad>'
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase) , lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase) , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(len(lowerCAmelCase) , 10_08)
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_08)
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
lowercase__ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return XGLMTokenizer.from_pretrained('facebook/xglm-564M')
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase , f.name)
lowercase__ = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase)
lowercase__ = pickle.dumps(lowerCAmelCase)
pickle.loads(lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = 'I was born in 92000, and this is falsé.'
lowercase__ = tokenizer.tokenize(lowerCAmelCase)
lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
@slow
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
lowercase__ = 'Hello World!'
lowercase__ = [2, 3_12_27, 44_47, 35]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowercase__ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = {
'input_ids': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name='facebook/xglm-564M' , padding=lowerCAmelCase , )
| 642
| 1
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : List[str] = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Tuple=0) -> Any:
"""simple docstring"""
lowercase__ = np.random.RandomState(lowerCAmelCase)
lowercase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**lowerCAmelCase).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowercase__ = np.array([0.6_50_72, 0.5_84_92, 0.4_82_19, 0.5_55_21, 0.5_31_80, 0.5_59_39, 0.5_06_97, 0.3_98_00, 0.4_64_55])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
lowercase__ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCAmelCase)
pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**lowerCAmelCase).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowercase__ = np.array([0.6_58_63, 0.5_94_25, 0.4_93_26, 0.5_63_13, 0.5_38_75, 0.5_66_27, 0.5_10_65, 0.3_97_77, 0.4_63_30])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
lowercase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**lowerCAmelCase).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowercase__ = np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
lowercase__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**lowerCAmelCase).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowercase__ = np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
lowercase__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**lowerCAmelCase).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowercase__ = np.array([0.5_38_17, 0.6_08_12, 0.4_73_84, 0.4_95_30, 0.5_18_94, 0.4_98_14, 0.4_79_84, 0.3_89_58, 0.4_42_71])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = self.get_dummy_inputs()
lowercase__ = pipe(**lowerCAmelCase).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowercase__ = np.array([0.5_38_95, 0.6_08_08, 0.4_79_33, 0.4_96_08, 0.5_18_86, 0.4_99_50, 0.4_80_53, 0.3_89_57, 0.4_42_00])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = self.get_dummy_inputs()
lowercase__ = 3 * [inputs['prompt']]
# forward
lowercase__ = pipe(**lowerCAmelCase)
lowercase__ = output.images[0, -3:, -3:, -1]
lowercase__ = self.get_dummy_inputs()
lowercase__ = 3 * [inputs.pop('prompt')]
lowercase__ = pipe.tokenizer(
lowerCAmelCase , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=lowerCAmelCase , return_tensors='np' , )
lowercase__ = text_inputs['input_ids']
lowercase__ = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
lowercase__ = prompt_embeds
# forward
lowercase__ = pipe(**lowerCAmelCase)
lowercase__ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = self.get_dummy_inputs()
lowercase__ = 3 * ['this is a negative prompt']
lowercase__ = negative_prompt
lowercase__ = 3 * [inputs['prompt']]
# forward
lowercase__ = pipe(**lowerCAmelCase)
lowercase__ = output.images[0, -3:, -3:, -1]
lowercase__ = self.get_dummy_inputs()
lowercase__ = 3 * [inputs.pop('prompt')]
lowercase__ = []
for p in [prompt, negative_prompt]:
lowercase__ = pipe.tokenizer(
lowerCAmelCase , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=lowerCAmelCase , return_tensors='np' , )
lowercase__ = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
lowercase__, lowercase__ = embeds
# forward
lowercase__ = pipe(**lowerCAmelCase)
lowercase__ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
lowercase__ = ort.SessionOptions()
lowercase__ = False
return options
def UpperCAmelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = 'A painting of a squirrel eating a burger'
np.random.seed(0)
lowercase__ = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='np')
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase__ = np.array([0.04_52, 0.03_90, 0.00_87, 0.03_50, 0.06_17, 0.03_64, 0.05_44, 0.05_23, 0.07_20])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx')
lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = 'open neural network exchange'
lowercase__ = np.random.RandomState(0)
lowercase__ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase , output_type='np')
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase__ = np.array([0.28_67, 0.19_74, 0.14_81, 0.72_94, 0.72_51, 0.66_67, 0.41_94, 0.56_42, 0.64_86])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
lowercase__ = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx')
lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = 'open neural network exchange'
lowercase__ = np.random.RandomState(0)
lowercase__ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase , output_type='np')
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase__ = np.array([0.23_06, 0.19_59, 0.15_93, 0.65_49, 0.63_94, 0.54_08, 0.50_65, 0.60_10, 0.61_61])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = 0
def test_callback_fn(lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : np.ndarray) -> None:
lowercase__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
lowercase__ = latents[0, -3:, -3:, -1]
lowercase__ = np.array(
[-0.67_72, -0.38_35, -1.24_56, 0.19_05, -1.09_74, 0.69_67, -1.93_53, 0.01_78, 1.01_67])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
lowercase__ = latents[0, -3:, -3:, -1]
lowercase__ = np.array(
[-0.33_51, 0.22_41, -0.18_37, -0.23_25, -0.65_77, 0.33_93, -0.02_41, 0.58_99, 1.38_75])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1E-3
lowercase__ = False
lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = 'Andromeda galaxy in a bottle'
lowercase__ = np.random.RandomState(0)
pipe(
prompt=lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCAmelCase ( self : str) -> Any:
"""simple docstring"""
lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(lowerCAmelCase , lowerCAmelCase)
assert pipe.safety_checker is None
lowercase__ = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase)
lowercase__ = OnnxStableDiffusionPipeline.from_pretrained(lowerCAmelCase)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase__ = pipe('example prompt' , num_inference_steps=2).images[0]
assert image is not None
| 642
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
lowercase__ = data
lowercase__ = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0]
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = B'\x80' + B'\x00' * (63 - (len(self.data) + 8) % 64)
lowercase__ = self.data + padding + struct.pack('>Q' , 8 * len(self.data))
return padded_data
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64)
]
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> List[Any]:
"""simple docstring"""
lowercase__ = list(struct.unpack('>16L' , lowerCAmelCase)) + [0] * 64
for i in range(16 , 80):
lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1)
return w
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.padding()
lowercase__ = self.split_blocks()
for block in self.blocks:
lowercase__ = self.expand_block(lowerCAmelCase)
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.h
for i in range(0 , 80):
if 0 <= i < 20:
lowercase__ = (b & c) | ((~b) & d)
lowercase__ = 0X5_A_8_2_7_9_9_9
elif 20 <= i < 40:
lowercase__ = b ^ c ^ d
lowercase__ = 0X6_E_D_9_E_B_A_1
elif 40 <= i < 60:
lowercase__ = (b & c) | (b & d) | (c & d)
lowercase__ = 0X8_F_1_B_B_C_D_C
elif 60 <= i < 80:
lowercase__ = b ^ c ^ d
lowercase__ = 0XC_A_6_2_C_1_D_6
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = (
self.rotate(lowerCAmelCase , 5) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F,
a,
self.rotate(lowerCAmelCase , 30),
c,
d,
)
lowercase__ = (
self.h[0] + a & 0XF_F_F_F_F_F_F_F,
self.h[1] + b & 0XF_F_F_F_F_F_F_F,
self.h[2] + c & 0XF_F_F_F_F_F_F_F,
self.h[3] + d & 0XF_F_F_F_F_F_F_F,
self.h[4] + e & 0XF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h)
def _lowerCAmelCase ( ):
lowercase__ = B'Test String'
assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowercase__ = parser.parse_args()
lowercase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase__ = f.read()
else:
lowercase__ = bytes(A__ , 'utf-8' )
print(SHAaHash(A__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 642
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[Any] = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
a__ : List[Any] = logging.get_logger(__name__)
a__ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
a__ : List[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
a__ : int = {
"facebook/bart-base": 10_24,
"facebook/bart-large": 10_24,
"facebook/bart-large-mnli": 10_24,
"facebook/bart-large-cnn": 10_24,
"facebook/bart-large-xsum": 10_24,
"yjernite/bart_eli5": 10_24,
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Optional[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = ["input_ids", "attention_mask"]
A : Any = BartTokenizer
def __init__( self : List[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str="replace" , lowerCAmelCase : str="<s>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : str="<unk>" , lowerCAmelCase : int="<pad>" , lowerCAmelCase : int="<mask>" , lowerCAmelCase : Dict=False , lowerCAmelCase : List[Any]=True , **lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = getattr(lowerCAmelCase , pre_tok_state.pop('type'))
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**lowerCAmelCase)
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = 'post_processor'
lowercase__ = getattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state['sep'])
if "cls" in state:
lowercase__ = tuple(state['cls'])
lowercase__ = False
if state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get('trim_offsets' , lowerCAmelCase) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(lowerCAmelCase , state.pop('type'))
lowercase__ = component_class(**lowerCAmelCase)
setattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
@property
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else value
lowercase__ = value
def UpperCAmelCase ( self : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[int]) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase)
return tuple(lowerCAmelCase)
def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=None) -> Tuple:
"""simple docstring"""
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 642
| 1
|
def _lowerCAmelCase ( A__ , A__ ):
while second != 0:
lowercase__ = first & second
first ^= second
lowercase__ = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : int = int(input("Enter the first number: ").strip())
a__ : List[str] = int(input("Enter the second number: ").strip())
print(F'''{add(first, second) = }''')
| 642
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : str = (DDIMParallelScheduler,)
A : Any = (("eta", 0.0), ("num_inference_steps", 50))
def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = {
'num_train_timesteps': 10_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**lowerCAmelCase)
return config
def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**lowerCAmelCase)
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase)
for t in scheduler.timesteps:
lowercase__ = model(lowerCAmelCase , lowerCAmelCase)
lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase).prev_sample
return sample
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase)
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(steps_offset=1)
lowercase__ = scheduler_class(**lowerCAmelCase)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1]))
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , )
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00]):
self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00) - 0.1_47_71)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60) - 0.3_24_60)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98) - 0.02)) < 1E-5
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
scheduler.set_timesteps(lowerCAmelCase)
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = self.dummy_sample_deter + 0.1
lowercase__ = self.dummy_sample_deter - 0.1
lowercase__ = samplea.shape[0]
lowercase__ = torch.stack([samplea, samplea, samplea] , dim=0)
lowercase__ = torch.arange(lowerCAmelCase)[0:3, None].repeat(1 , lowerCAmelCase)
lowercase__ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowercase__ = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , lowerCAmelCase)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 11_47.79_04) < 1E-2
assert abs(result_mean.item() - 0.49_82) < 1E-3
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
lowercase__ = self.full_loop()
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_72.00_67) < 1E-2
assert abs(result_mean.item() - 0.22_39_67) < 1E-3
def UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(prediction_type='v_prediction')
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 52.53_02) < 1E-2
assert abs(result_mean.item() - 0.06_84) < 1E-3
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.82_95) < 1E-2
assert abs(result_mean.item() - 0.19_51) < 1E-3
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.07_84) < 1E-2
assert abs(result_mean.item() - 0.19_41) < 1E-3
| 642
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple=13 , lowerCAmelCase : List[str]=7 , lowerCAmelCase : int=True , lowerCAmelCase : str=True , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : str=99 , lowerCAmelCase : Tuple=32 , lowerCAmelCase : Optional[int]=5 , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : Tuple=37 , lowerCAmelCase : Tuple="gelu" , lowerCAmelCase : str=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : Any=5_12 , lowerCAmelCase : Union[str, Any]=16 , lowerCAmelCase : str=2 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : List[str]=None , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = self.vocab_size - 1
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowercase__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , *lowerCAmelCase : Tuple) -> List[str]:
"""simple docstring"""
lowercase__ = OpenAIGPTModel(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = model(lowerCAmelCase , token_type_ids=lowerCAmelCase , head_mask=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase , token_type_ids=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self : Any , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] , *lowerCAmelCase : Union[str, Any]) -> List[str]:
"""simple docstring"""
lowercase__ = OpenAIGPTLMHeadModel(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Tuple , *lowerCAmelCase : Any) -> int:
"""simple docstring"""
lowercase__ = OpenAIGPTDoubleHeadsModel(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self : str , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , *lowerCAmelCase : Optional[int]) -> Any:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = OpenAIGPTForSequenceClassification(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = config_and_inputs
lowercase__ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
A : Optional[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
A : Union[str, Any] = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]) -> List[Any]:
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : str=False) -> Dict:
"""simple docstring"""
lowercase__ = super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase)
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowercase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase , )
lowercase__ = inputs_dict['labels']
lowercase__ = inputs_dict['labels']
lowercase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase , )
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase)
return inputs_dict
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
lowercase__ = OpenAIGPTModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase , n_embd=37)
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase)
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase)
def UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase)
@slow
def UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = OpenAIGPTModel.from_pretrained(lowerCAmelCase)
self.assertIsNotNone(lowerCAmelCase)
@require_torch
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
lowercase__ = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt')
model.to(lowerCAmelCase)
lowercase__ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=lowerCAmelCase) # the president is
lowercase__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowercase__ = model.generate(lowerCAmelCase , do_sample=lowerCAmelCase)
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase)
| 642
|
import cva
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : float , lowerCAmelCase : int) -> Dict:
"""simple docstring"""
if k in (0.04, 0.06):
lowercase__ = k
lowercase__ = window_size
else:
raise ValueError('invalid k value')
def __str__( self : Tuple) -> str:
"""simple docstring"""
return str(self.k)
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowercase__ = cva.imread(lowerCAmelCase , 0)
lowercase__, lowercase__ = img.shape
lowercase__ = []
lowercase__ = img.copy()
lowercase__ = cva.cvtColor(lowerCAmelCase , cva.COLOR_GRAY2RGB)
lowercase__, lowercase__ = np.gradient(lowerCAmelCase)
lowercase__ = dx**2
lowercase__ = dy**2
lowercase__ = dx * dy
lowercase__ = 0.04
lowercase__ = self.window_size // 2
for y in range(lowerCAmelCase , h - offset):
for x in range(lowerCAmelCase , w - offset):
lowercase__ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = (wxx * wyy) - (wxy**2)
lowercase__ = wxx + wyy
lowercase__ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0) , 0)
color_img.itemset((y, x, 1) , 0)
color_img.itemset((y, x, 2) , 2_55)
return color_img, corner_list
if __name__ == "__main__":
a__ : Dict = HarrisCorner(0.0_4, 3)
a__ , a__ : Dict = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 642
| 1
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
a__ : Dict = datasets.logging.get_logger(__name__)
a__ : Tuple = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
a__ : Any = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
a__ : Any = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def _lowerCAmelCase ( A__ , A__ , A__=False , A__=False , A__=True , A__=False , A__="dummy_doc" ):
lowercase__ = {doc: key_lines}
lowercase__ = {doc: sys_lines}
lowercase__ = {}
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
lowercase__, lowercase__ = reader.get_doc_mentions(A__ , key_doc_lines[doc] , A__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowercase__ = reader.set_annotated_parse_trees(A__ , key_doc_lines[doc] , A__ , A__ )
lowercase__, lowercase__ = reader.get_doc_mentions(A__ , sys_doc_lines[doc] , A__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowercase__ = reader.set_annotated_parse_trees(A__ , key_doc_lines[doc] , A__ , A__ )
if remove_nested:
lowercase__, lowercase__ = reader.remove_nested_coref_mentions(A__ , A__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowercase__, lowercase__ = reader.remove_nested_coref_mentions(A__ , A__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowercase__ = reader.get_mention_assignments(A__ , A__ )
lowercase__ = reader.get_mention_assignments(A__ , A__ )
lowercase__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
'Number of resulting singleton clusters in the key '
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
'files, respectively' )
return doc_coref_infos
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
lowercase__ = get_coref_infos(A__ , A__ , A__ , A__ , A__ , A__ )
lowercase__ = {}
lowercase__ = 0
lowercase__ = 0
for name, metric in metrics:
lowercase__, lowercase__, lowercase__ = evaluator.evaluate_documents(A__ , A__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
lowercase__ = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({'conll_score': conll} )
return output_scores
def _lowerCAmelCase ( A__ ):
lowercase__ = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
lowercase__ = line.split()[5]
if not parse_col == "-":
lowercase__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string')),
'references': datasets.Sequence(datasets.Value('string')),
}) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : int=False) -> Optional[int]:
"""simple docstring"""
lowercase__ = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
lowercase__ = util.check_gold_parse_annotation(lowerCAmelCase)
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.')
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowercase__ = evaluate(
key_lines=lowerCAmelCase , sys_lines=lowerCAmelCase , metrics=lowerCAmelCase , NP_only=lowerCAmelCase , remove_nested=lowerCAmelCase , keep_singletons=lowerCAmelCase , min_span=lowerCAmelCase , )
return score
| 642
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : List[Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : int = "speech_to_text"
A : Optional[Any] = ["past_key_values"]
A : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[int] , lowerCAmelCase : Tuple=1_00_00 , lowerCAmelCase : int=12 , lowerCAmelCase : int=20_48 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : str=6 , lowerCAmelCase : Dict=20_48 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict="relu" , lowerCAmelCase : Tuple=2_56 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Any=60_00 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[Any]=(5, 5) , lowerCAmelCase : Union[str, Any]=10_24 , lowerCAmelCase : List[Any]=80 , lowerCAmelCase : List[str]=1 , **lowerCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_source_positions
lowercase__ = max_target_positions
lowercase__ = num_conv_layers
lowercase__ = list(lowerCAmelCase)
lowercase__ = conv_channels
lowercase__ = input_feat_per_channel
lowercase__ = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''')
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 642
| 1
|
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a__ : str = logging.get_logger(__name__)
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Optional[int]) -> Optional[int]:
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase):
lowercase__ = [label.strip() for label in labels.split(',') if label.strip()]
return labels
def __call__( self : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any]) -> List[Any]:
"""simple docstring"""
if len(lowerCAmelCase) == 0 or len(lowerCAmelCase) == 0:
raise ValueError('You must include at least one label and at least one sequence.')
if hypothesis_template.format(labels[0]) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(lowerCAmelCase))
if isinstance(lowerCAmelCase , lowerCAmelCase):
lowercase__ = [sequences]
lowercase__ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCAmelCase)] for label in labels])
return sequence_pairs, sequences
@add_end_docstrings(lowerCamelCase )
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : Dict=ZeroShotClassificationArgumentHandler() , *lowerCAmelCase : Tuple , **lowerCAmelCase : Union[str, Any]) -> Dict:
"""simple docstring"""
lowercase__ = args_parser
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.')
@property
def UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail'):
return ind
return -1
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : int=True , lowerCAmelCase : Dict=TruncationStrategy.ONLY_FIRST , **lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
lowercase__ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`')
lowercase__ = self.tokenizer.eos_token
try:
lowercase__ = self.tokenizer(
lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , )
except Exception as e:
if "too short" in str(lowerCAmelCase):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
lowercase__ = self.tokenizer(
lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=lowerCAmelCase , padding=lowerCAmelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def UpperCAmelCase ( self : str , **lowerCAmelCase : List[str]) -> int:
"""simple docstring"""
if kwargs.get('multi_class' , lowerCAmelCase) is not None:
lowercase__ = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.')
lowercase__ = {}
if "candidate_labels" in kwargs:
lowercase__ = self._args_parser._parse_labels(kwargs['candidate_labels'])
if "hypothesis_template" in kwargs:
lowercase__ = kwargs['hypothesis_template']
lowercase__ = {}
if "multi_label" in kwargs:
lowercase__ = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[int] , lowerCAmelCase : Union[str, List[str]] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Tuple , ) -> List[Any]:
"""simple docstring"""
if len(lowerCAmelCase) == 0:
pass
elif len(lowerCAmelCase) == 1 and "candidate_labels" not in kwargs:
lowercase__ = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''')
return super().__call__(lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str="This example is {}.") -> str:
"""simple docstring"""
lowercase__, lowercase__ = self._args_parser(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCAmelCase , lowerCAmelCase)):
lowercase__ = self._parse_and_tokenize([sequence_pair])
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowerCAmelCase) - 1,
**model_input,
}
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Union[str, Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = inputs['candidate_label']
lowercase__ = inputs['sequence']
lowercase__ = {k: inputs[k] for k in self.tokenizer.model_input_names}
lowercase__ = self.model(**lowerCAmelCase)
lowercase__ = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : int=False) -> List[str]:
"""simple docstring"""
lowercase__ = [outputs['candidate_label'] for outputs in model_outputs]
lowercase__ = [outputs['sequence'] for outputs in model_outputs]
lowercase__ = np.concatenate([output['logits'].numpy() for output in model_outputs])
lowercase__ = logits.shape[0]
lowercase__ = len(lowerCAmelCase)
lowercase__ = N // n
lowercase__ = logits.reshape((num_sequences, n, -1))
if multi_label or len(lowerCAmelCase) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
lowercase__ = self.entailment_id
lowercase__ = -1 if entailment_id == 0 else 0
lowercase__ = reshaped_outputs[..., [contradiction_id, entailment_id]]
lowercase__ = np.exp(lowerCAmelCase) / np.exp(lowerCAmelCase).sum(-1 , keepdims=lowerCAmelCase)
lowercase__ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
lowercase__ = reshaped_outputs[..., self.entailment_id]
lowercase__ = np.exp(lowerCAmelCase) / np.exp(lowerCAmelCase).sum(-1 , keepdims=lowerCAmelCase)
lowercase__ = list(reversed(scores[0].argsort()))
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 642
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Any = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 1
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected' , [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(A__ , i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = _distribute_shards(**A__ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected' , [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
] , )
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = _split_gen_kwargs(A__ , A__ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected' , [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
] , )
def _lowerCAmelCase ( A__ , A__ ):
if expected is RuntimeError:
with pytest.raises(A__ ):
_number_of_shards_in_gen_kwargs(A__ )
else:
lowercase__ = _number_of_shards_in_gen_kwargs(A__ )
assert out == expected
| 642
|
# Imports
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None) -> Dict:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : str=None , lowerCAmelCase : str=None) -> int:
"""simple docstring"""
if red is not None:
lowercase__ = red
if green is not None:
lowercase__ = green
if blue is not None:
lowercase__ = blue
if red_edge is not None:
lowercase__ = red_edge
if nir is not None:
lowercase__ = nir
return True
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Union[str, Any]="" , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None) -> Union[str, Any]:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
lowercase__ = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!')
return False
def UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any]=0.08 , lowerCAmelCase : Optional[int]=1.22 , lowerCAmelCase : int=0.03) -> List[Any]:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return (self.nir / self.green) - 1
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
return self.nir - self.green
def UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def UpperCAmelCase ( self : int , lowerCAmelCase : int=0.16) -> Dict:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self : str , lowerCAmelCase : Optional[int]=0.5) -> Union[str, Any]:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None) -> Tuple:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
lowercase__ = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
lowercase__ = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def UpperCAmelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 642
| 1
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a__ : int = get_logger()
a__ : Optional[dict] = None
class UpperCAmelCase__( TensorFormatter[Mapping, "jax.Array", Mapping] ):
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[int]=None , **lowerCAmelCase : Optional[Any]) -> List[str]:
"""simple docstring"""
super().__init__(features=lowerCAmelCase)
import jax
from jaxlib.xla_client import Device
if isinstance(lowerCAmelCase , lowerCAmelCase):
raise ValueError(
f'''Expected {device} to be a `str` not {type(lowerCAmelCase)}, as `jaxlib.xla_extension.Device` '''
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.')
lowercase__ = device if isinstance(lowerCAmelCase , lowerCAmelCase) else str(jax.devices()[0])
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase__ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys()):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default '''
f'''device: {str(jax.devices()[0])}.''')
lowercase__ = str(jax.devices()[0])
lowercase__ = jnp_array_kwargs
@staticmethod
def UpperCAmelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
"""simple docstring"""
import jax
return {str(lowerCAmelCase): device for device in jax.devices()}
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(lowerCAmelCase , lowerCAmelCase) and column:
if all(
isinstance(lowerCAmelCase , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column):
return jnp.stack(lowerCAmelCase , axis=0)
return column
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[Any]) -> int:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(lowerCAmelCase , (str, bytes, type(lowerCAmelCase))):
return value
elif isinstance(lowerCAmelCase , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
lowercase__ = {}
if isinstance(lowerCAmelCase , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowercase__ = {'dtype': jnp.intaa}
else:
lowercase__ = {'dtype': jnp.intaa}
elif isinstance(lowerCAmelCase , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
lowercase__ = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCAmelCase , PIL.Image.Image):
lowercase__ = np.asarray(lowerCAmelCase)
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase__ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device]):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(lowerCAmelCase , **{**default_dtype, **self.jnp_array_kwargs})
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> List[str]:
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(lowerCAmelCase , torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(lowerCAmelCase , '__array__') and not isinstance(lowerCAmelCase , jax.Array):
lowercase__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCAmelCase , np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCAmelCase) for substruct in data_struct])
elif isinstance(lowerCAmelCase , (list, tuple)):
return self._consolidate([self.recursive_tensorize(lowerCAmelCase) for substruct in data_struct])
return self._tensorize(lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : dict) -> int:
"""simple docstring"""
return map_nested(self._recursive_tensorize , lowerCAmelCase , map_list=lowerCAmelCase)
def UpperCAmelCase ( self : Any , lowerCAmelCase : pa.Table) -> Mapping:
"""simple docstring"""
lowercase__ = self.numpy_arrow_extractor().extract_row(lowerCAmelCase)
lowercase__ = self.python_features_decoder.decode_row(lowerCAmelCase)
return self.recursive_tensorize(lowerCAmelCase)
def UpperCAmelCase ( self : Any , lowerCAmelCase : pa.Table) -> "jax.Array":
"""simple docstring"""
lowercase__ = self.numpy_arrow_extractor().extract_column(lowerCAmelCase)
lowercase__ = self.python_features_decoder.decode_column(lowerCAmelCase , pa_table.column_names[0])
lowercase__ = self.recursive_tensorize(lowerCAmelCase)
lowercase__ = self._consolidate(lowerCAmelCase)
return column
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : pa.Table) -> Mapping:
"""simple docstring"""
lowercase__ = self.numpy_arrow_extractor().extract_batch(lowerCAmelCase)
lowercase__ = self.python_features_decoder.decode_batch(lowerCAmelCase)
lowercase__ = self.recursive_tensorize(lowerCAmelCase)
for column_name in batch:
lowercase__ = self._consolidate(batch[column_name])
return batch
| 642
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCAmelCase__( unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = load_tool('text-classification')
self.tool.setup()
lowercase__ = load_tool('text-classification' , remote=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
lowercase__ = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
lowercase__ = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
| 642
| 1
|
from math import ceil
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = list(range(0 , A__ ) )
lowercase__ = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowercase__ = []
for i in device_map_blocks:
if device_map_blocks.count(A__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(A__ )
# Missing blocks
lowercase__ = [i for i in blocks if i not in device_map_blocks]
lowercase__ = [i for i in device_map_blocks if i not in blocks]
if len(A__ ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(A__ ) )
if len(A__ ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(A__ ) )
if len(A__ ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(A__ ) )
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = list(range(A__ ) )
lowercase__ = int(ceil(n_layers / len(A__ ) ) )
lowercase__ = [layers[i : i + n_blocks] for i in range(0 , A__ , A__ )]
return dict(zip(A__ , A__ ) )
| 642
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = None
A : Optional[int] = None
@property
def UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowerCAmelCase , 'feature_size'))
self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate'))
self.assertTrue(hasattr(lowerCAmelCase , 'padding_value'))
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(lowerCAmelCase) == len(lowerCAmelCase) for x, y in zip(lowerCAmelCase , processed_features[input_name])))
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def UpperCAmelCase ( self : str , lowerCAmelCase : str=False) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = self.feat_extract_tester.seq_length_diff
lowercase__ = self.feat_extract_tester.max_seq_length + pad_diff
lowercase__ = self.feat_extract_tester.min_seq_length
lowercase__ = self.feat_extract_tester.batch_size
lowercase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , padding=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest')
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1]))
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
lowercase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length')[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
self.assertTrue(all(len(lowerCAmelCase) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
lowercase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCAmelCase) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
lowercase__ = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1E-3)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict=False) -> str:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : str , lowerCAmelCase : Optional[Any]):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
# truncate to smallest
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]))
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to smallest with np
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to middle
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length' , truncation=lowerCAmelCase)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = 12
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , )
lowercase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase__ = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
lowercase__ = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
@require_torch
def UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='pt')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2)
@require_tf
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='tf')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2)
def UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = min(lowerCAmelCase)
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 642
| 1
|
def _lowerCAmelCase ( A__ ):
lowercase__ = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
lowercase__ = hex_num[0] == '-'
if is_negative:
lowercase__ = hex_num[1:]
try:
lowercase__ = int(A__ , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
lowercase__ = ''
while int_num > 0:
lowercase__ = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowerCAmelCase ( A__ ):
lowercase__ = prime_factors(A__ )
if is_square_free(A__ ):
return -1 if len(A__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 1
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
a__ : Optional[int] = logging.getLogger(__name__)
torch.set_grad_enabled(False)
a__ : Tuple = "cuda" if torch.cuda.is_available() else "cpu"
def _lowerCAmelCase ( A__ , A__=100 , A__=" " ):
lowercase__ = text.split(A__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(A__ ) , A__ )]
def _lowerCAmelCase ( A__ ):
lowercase__, lowercase__ = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(A__ ):
titles.append(title if title is not None else '' )
texts.append(A__ )
return {"title": titles, "text": texts}
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=A__ , padding='longest' , return_tensors='pt' )['input_ids']
lowercase__ = ctx_encoder(input_ids.to(device=A__ ) , return_dict=A__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _lowerCAmelCase ( A__ , A__ , A__ , ):
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase__ = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase__ = dataset.map(A__ , batched=A__ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowercase__ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=A__ )
lowercase__ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowercase__ = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
lowercase__ = dataset.map(
partial(A__ , ctx_encoder=A__ , ctx_tokenizer=A__ ) , batched=A__ , batch_size=processing_args.batch_size , features=A__ , )
# And finally save your dataset
lowercase__ = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(A__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase__ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=A__ )
# And save the index
lowercase__ = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(A__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCAmelCase__:
'''simple docstring'''
A : str = field(
default=str(Path(lowerCamelCase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
A : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
A : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
A : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
A : Optional[str] = field(
default=str(Path(lowerCamelCase ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class UpperCAmelCase__:
'''simple docstring'''
A : Optional[int] = field(
default=lowerCamelCase , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
A : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class UpperCAmelCase__:
'''simple docstring'''
A : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
A : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
a__ : Optional[int] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
a__ , a__ , a__ : Optional[int] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
a__ : int = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 642
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : List[str] = logging.get_logger(__name__)
a__ : List[Any] = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
A : List[str] = "focalnet"
def __init__( self : Dict , lowerCAmelCase : Union[str, Any]=2_24 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : int=3 , lowerCAmelCase : Union[str, Any]=96 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : int=[1_92, 3_84, 7_68, 7_68] , lowerCAmelCase : str=[2, 2, 6, 2] , lowerCAmelCase : Tuple=[2, 2, 2, 2] , lowerCAmelCase : Optional[Any]=[3, 3, 3, 3] , lowerCAmelCase : int="gelu" , lowerCAmelCase : Any=4.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Tuple=1E-4 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[int]=1E-5 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : str , ) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = use_conv_embed
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = focal_levels
lowercase__ = focal_windows
lowercase__ = hidden_act
lowercase__ = mlp_ratio
lowercase__ = hidden_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = use_layerscale
lowercase__ = layerscale_value
lowercase__ = use_post_layernorm
lowercase__ = use_post_layernorm_in_modulation
lowercase__ = normalize_modulator
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = encoder_stride
lowercase__ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths) + 1)]
lowercase__, lowercase__ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
| 642
| 1
|
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def _lowerCAmelCase ( A__="" ):
lowercase__ = tempfile.mkdtemp()
return os.path.join(A__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
lowercase__ = torch.rand(12 , dtype=torch.floataa) - 0.5
lowercase__ = AgentAudio(lowerCAmelCase)
lowercase__ = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase , agent_type.to_raw() , atol=1E-4))
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCAmelCase))
# Ensure that the file contains the same value as the original tensor
lowercase__, lowercase__ = sf.read(lowerCAmelCase)
self.assertTrue(torch.allclose(lowerCAmelCase , torch.tensor(lowerCAmelCase) , atol=1E-4))
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
lowercase__ = torch.rand(12 , dtype=torch.floataa) - 0.5
lowercase__ = get_new_path(suffix='.wav')
sf.write(lowerCAmelCase , lowerCAmelCase , 1_60_00)
lowercase__ = AgentAudio(lowerCAmelCase)
self.assertTrue(torch.allclose(lowerCAmelCase , agent_type.to_raw() , atol=1E-4))
self.assertEqual(agent_type.to_string() , lowerCAmelCase)
@require_vision
@require_torch
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = torch.randint(0 , 2_56 , (64, 64, 3))
lowercase__ = AgentImage(lowerCAmelCase)
lowercase__ = str(agent_type.to_string())
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase , agent_type._tensor , atol=1E-4))
self.assertIsInstance(agent_type.to_raw() , Image.Image)
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase))
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
lowercase__ = Path(get_tests_dir('fixtures/tests_samples/COCO')) / '000000039769.png'
lowercase__ = Image.open(lowerCAmelCase)
lowercase__ = AgentImage(lowerCAmelCase)
self.assertTrue(path.samefile(agent_type.to_string()))
self.assertTrue(image == agent_type.to_raw())
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase))
def UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
lowercase__ = Path(get_tests_dir('fixtures/tests_samples/COCO')) / '000000039769.png'
lowercase__ = Image.open(lowerCAmelCase)
lowercase__ = AgentImage(lowerCAmelCase)
self.assertFalse(path.samefile(agent_type.to_string()))
self.assertTrue(image == agent_type.to_raw())
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase))
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
lowercase__ = 'Hey!'
lowercase__ = AgentText(lowerCAmelCase)
self.assertEqual(lowerCAmelCase , agent_type.to_string())
self.assertEqual(lowerCAmelCase , agent_type.to_raw())
self.assertEqual(lowerCAmelCase , lowerCAmelCase)
| 642
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
a__ : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
a__ : Any = {"facebook/blenderbot_small-90M": 5_12}
def _lowerCAmelCase ( A__ ):
lowercase__ = set()
lowercase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ = char
lowercase__ = set(A__ )
return pairs
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[str] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : int="__start__" , lowerCAmelCase : Dict="__end__" , lowerCAmelCase : Any="__unk__" , lowerCAmelCase : str="__null__" , **lowerCAmelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase)
with open(lowerCAmelCase , encoding='utf-8') as vocab_handle:
lowercase__ = json.load(lowerCAmelCase)
lowercase__ = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase , encoding='utf-8') as merges_handle:
lowercase__ = merges_handle.read().split('\n')[1:-1]
lowercase__ = [tuple(merge.split()) for merge in merges]
lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase))))
lowercase__ = {}
@property
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return len(self.encoder)
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase ( self : str , lowerCAmelCase : str) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ = re.sub('([.,!?()])' , R' \1' , lowerCAmelCase)
lowercase__ = re.sub('(\')' , R' \1 ' , lowerCAmelCase)
lowercase__ = re.sub(R'\s{2,}' , ' ' , lowerCAmelCase)
if "\n" in token:
lowercase__ = token.replace('\n' , ' __newln__')
lowercase__ = token.split(' ')
lowercase__ = []
for token in tokens:
if not len(lowerCAmelCase):
continue
lowercase__ = token.lower()
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = tuple(list(word[:-1]) + [word[-1] + '</w>'])
lowercase__ = get_pairs(lowerCAmelCase)
if not pairs:
words.append(lowerCAmelCase)
continue
while True:
lowercase__ = min(lowerCAmelCase , key=lambda lowerCAmelCase: self.bpe_ranks.get(lowerCAmelCase , float('inf')))
if bigram not in self.bpe_ranks:
break
lowercase__, lowercase__ = bigram
lowercase__ = []
lowercase__ = 0
while i < len(lowerCAmelCase):
try:
lowercase__ = word.index(lowerCAmelCase , lowerCAmelCase)
new_word.extend(word[i:j])
lowercase__ = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(lowerCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = new_word
if len(lowerCAmelCase) == 1:
break
else:
lowercase__ = get_pairs(lowerCAmelCase)
lowercase__ = '@@ '.join(lowerCAmelCase)
lowercase__ = word[:-4]
lowercase__ = word
words.append(lowerCAmelCase)
return " ".join(lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = re.findall(R'\S+\n?' , lowerCAmelCase)
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase).split(' ')))
return split_tokens
def UpperCAmelCase ( self : int , lowerCAmelCase : str) -> int:
"""simple docstring"""
lowercase__ = token.lower()
return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token))
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self.decoder.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : List[str]) -> str:
"""simple docstring"""
lowercase__ = ' '.join(lowerCAmelCase).replace('@@ ' , '').strip()
return out_string
def UpperCAmelCase ( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(lowerCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase) + '\n')
lowercase__ = 0
with open(lowerCAmelCase , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase: kv[1]):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!')
lowercase__ = token_index
writer.write(' '.join(lowerCAmelCase) + '\n')
index += 1
return vocab_file, merge_file
| 642
| 1
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _lowerCAmelCase ( A__ ):
for param in module.parameters():
lowercase__ = False
def _lowerCAmelCase ( ):
lowercase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase__ = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def _lowerCAmelCase ( A__ ):
lowercase__ = plt.imshow(A__ )
fig.axes.get_xaxis().set_visible(A__ )
fig.axes.get_yaxis().set_visible(A__ )
plt.show()
def _lowerCAmelCase ( ):
lowercase__ = datetime.now()
lowercase__ = current_time.strftime('%H:%M:%S' )
return timestamp
| 642
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Optional[int] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 1
|
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple=13 , lowerCAmelCase : List[Any]=30 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : List[str]=3 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : int=32 , lowerCAmelCase : int=5 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Dict=37 , lowerCAmelCase : str="gelu" , lowerCAmelCase : str=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Union[str, Any]=10 , lowerCAmelCase : Dict=0.02 , lowerCAmelCase : Any=3 , lowerCAmelCase : Union[str, Any]=0.6 , lowerCAmelCase : Optional[int]=None , ) -> Dict:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = mask_ratio
lowercase__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : Any) -> Dict:
"""simple docstring"""
lowercase__ = ViTMAEModel(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = ViTMAEForPreTraining(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = model(lowerCAmelCase)
lowercase__ = (self.image_size // self.patch_size) ** 2
lowercase__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
lowercase__ = 1
lowercase__ = ViTMAEForPreTraining(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowercase__ = model(lowerCAmelCase)
lowercase__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__, lowercase__, lowercase__ = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
A : Dict = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
A : List[Any] = False
A : List[Any] = False
A : Optional[Any] = False
A : Dict = False
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
lowercase__ = ViTMAEModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37)
def UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds')
def UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear))
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCAmelCase)
lowercase__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase)
def UpperCAmelCase ( self : int , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : str) -> int:
"""simple docstring"""
np.random.seed(2)
lowercase__ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
lowercase__ = torch.from_numpy(lowerCAmelCase)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ = pt_noise
super().check_pt_tf_models(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Any:
"""simple docstring"""
lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase))
lowercase__ = outputs[0].cpu().numpy()
lowercase__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase)
lowercase__ = model_class.from_pretrained(lowerCAmelCase)
model.to(lowerCAmelCase)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase))
# Make sure we don't have nans
lowercase__ = after_outputs[0].cpu().numpy()
lowercase__ = 0
lowercase__ = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCAmelCase , 1E-5)
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load')
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
pass
@slow
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ViTMAEModel.from_pretrained(lowerCAmelCase)
self.assertIsNotNone(lowerCAmelCase)
def _lowerCAmelCase ( ):
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None
@slow
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
np.random.seed(2)
lowercase__ = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base').to(lowerCAmelCase)
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCAmelCase , return_tensors='pt').to(lowerCAmelCase)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ = ViTMAEConfig()
lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
lowercase__ = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCAmelCase , noise=torch.from_numpy(lowerCAmelCase).to(device=lowerCAmelCase))
# verify the logits
lowercase__ = torch.Size((1, 1_96, 7_68))
self.assertEqual(outputs.logits.shape , lowerCAmelCase)
lowercase__ = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCAmelCase) , atol=1E-4))
| 642
|
import heapq
import sys
import numpy as np
a__ : Dict = tuple[int, int]
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = []
lowercase__ = set()
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf')
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return len(self.elements) == 0
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]) -> List[str]:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(lowerCAmelCase)
else:
# update
# print("update", item)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
if item in self.set:
self.set.remove(lowerCAmelCase)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
self.set.remove(lowerCAmelCase)
return (priority, item)
def _lowerCAmelCase ( A__ , A__ ):
# euclidean distance
lowercase__ = np.array(A__ )
lowercase__ = np.array(A__ )
return np.linalg.norm(a - b )
def _lowerCAmelCase ( A__ , A__ ):
# integer division by time variable
return consistent_heuristic(A__ , A__ ) // t
def _lowerCAmelCase ( A__ , A__ ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = g_function[start] + Wa * heuristics[i](A__ , A__ )
return ans
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = np.chararray((n, n) )
for i in range(A__ ):
for j in range(A__ ):
lowercase__ = '*'
for i in range(A__ ):
for j in range(A__ ):
if (j, (n - 1) - i) in blocks:
lowercase__ = '#'
lowercase__ = '-'
lowercase__ = back_pointer[goal]
while x != start:
((lowercase__), (lowercase__)) = x
# print(x)
lowercase__ = '-'
lowercase__ = back_pointer[x]
lowercase__ = '-'
for i in range(A__ ):
for j in range(A__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowercase__ = back_pointer[goal]
while x != start:
print(A__ , end=' ' )
lowercase__ = back_pointer[x]
print(A__ )
sys.exit()
def _lowerCAmelCase ( A__ ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
for itera in range(A__ ):
open_list[itera].remove_element(A__ )
# print("s", s)
# print("j", j)
((lowercase__), (lowercase__)) = s
lowercase__ = (x - 1, y)
lowercase__ = (x + 1, y)
lowercase__ = (x, y + 1)
lowercase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(A__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(A__ )
lowercase__ = -1
lowercase__ = float('inf' )
if valid(A__ ) and g_function[neighbours] > g_function[s] + 1:
lowercase__ = g_function[s] + 1
lowercase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(A__ , key(A__ , 0 , A__ , A__ ) )
if neighbours not in close_list_inad:
for var in range(1 , A__ ):
if key(A__ , A__ , A__ , A__ ) <= Wa * key(
A__ , 0 , A__ , A__ ):
open_list[j].put(
A__ , key(A__ , A__ , A__ , A__ ) )
def _lowerCAmelCase ( ):
lowercase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a__ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a__ : Any = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a__ : Any = make_common_ground()
a__ : Union[str, Any] = blocks_blk
# hyper parameters
a__ : List[Any] = 1
a__ : List[str] = 1
a__ : Optional[int] = 20
a__ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a__ : Tuple = (0, 0)
a__ : str = (n - 1, n - 1)
a__ : Optional[Any] = 1
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = {start: 0, goal: float('inf' )}
lowercase__ = {start: -1, goal: -1}
lowercase__ = []
lowercase__ = set()
for i in range(A__ ):
open_list.append(PriorityQueue() )
open_list[i].put(A__ , key(A__ , A__ , A__ , A__ ) )
lowercase__ = []
lowercase__ = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , A__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__, lowercase__ = open_list[i].top_show()
visited.add(A__ )
expand_state(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_inad.append(A__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__ = open_list[0].top_show()
visited.add(A__ )
expand_state(
A__ , 0 , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_anchor.append(A__ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(A__ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 642
| 1
|
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def _lowerCAmelCase ( A__ , A__ , A__ ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , A__ )
lowercase__ = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase__ = dataset_size < in_memory_max_size
else:
lowercase__ = False
lowercase__ = is_small_dataset(A__ )
assert result == expected
| 642
|
import math
import sys
def _lowerCAmelCase ( A__ ):
lowercase__ = ''
try:
with open(A__ , 'rb' ) as binary_file:
lowercase__ = binary_file.read()
for dat in data:
lowercase__ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = {'0': '0', '1': '1'}
lowercase__, lowercase__ = '', ''
lowercase__ = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase__ = lexicon[curr_string]
result += last_match_id
lowercase__ = last_match_id + '0'
if math.loga(A__ ).is_integer():
lowercase__ = {}
for curr_key in list(A__ ):
lowercase__ = lexicon.pop(A__ )
lowercase__ = new_lex
lowercase__ = last_match_id + '1'
index += 1
lowercase__ = ''
return result
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = 8
try:
with open(A__ , 'wb' ) as opened_file:
lowercase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(A__ ) , A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase__ = data_bits[counter:]
lowercase__ = data_bits[counter + 1 :]
return data_bits
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = read_file_binary(A__ )
lowercase__ = remove_prefix(A__ )
lowercase__ = decompress_data(A__ )
write_file_binary(A__ , A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 642
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a__ : Dict = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ["DeiTFeatureExtractor"]
a__ : Dict = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Tuple = {"vocab_file": "vocab.txt"}
a__ : int = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
a__ : Dict = {
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def _lowerCAmelCase ( A__ ):
with open(A__ , 'r' ) as f:
lowercase__ = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]="<unk>" , lowerCAmelCase : Dict="<cls>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : Union[str, Any]="<mask>" , lowerCAmelCase : Optional[Any]="<eos>" , **lowerCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = load_vocab_file(lowerCAmelCase)
lowercase__ = dict(enumerate(self.all_tokens))
lowercase__ = {tok: ind for ind, tok in enumerate(self.all_tokens)}
lowercase__ = unk_token
lowercase__ = cls_token
lowercase__ = pad_token
lowercase__ = mask_token
lowercase__ = eos_token
lowercase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens)
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> Dict:
"""simple docstring"""
return text.split()
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any=False) -> Union[str, Any]:
"""simple docstring"""
return len(self._id_to_token)
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens)}
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Dict , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.cls_token_id]
lowercase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!')
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List , lowerCAmelCase : Optional[List] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase__ = [1] + ([0] * len(lowerCAmelCase)) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCAmelCase) + [1]
return mask
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = os.path.join(lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt')
with open(lowerCAmelCase , 'w') as f:
f.write('\n'.join(self.all_tokens))
return (vocab_file,)
@property
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Union[List[str], List[AddedToken]] , lowerCAmelCase : bool = False) -> int:
"""simple docstring"""
return super()._add_tokens(lowerCAmelCase , special_tokens=lowerCAmelCase)
| 642
| 1
|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a__ : List[str] = 16
a__ : Dict = 32
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ = 16 ):
lowercase__ = AutoTokenizer.from_pretrained('bert-base-cased' )
lowercase__ = DatasetDict(
{
'train': dataset['train'].select(A__ ),
'validation': dataset['train'].select(A__ ),
'test': dataset['validation'],
} )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
A__ , batched=A__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
A__ , padding='longest' , max_length=A__ , pad_to_multiple_of=A__ , return_tensors='pt' , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets['train'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
lowercase__ = DataLoader(
tokenized_datasets['validation'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
lowercase__ = DataLoader(
tokenized_datasets['test'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader, test_dataloader
def _lowerCAmelCase ( A__ , A__ ):
# New Code #
lowercase__ = []
# Download the dataset
lowercase__ = load_dataset('glue' , 'mrpc' )
# Create our splits
lowercase__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowercase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config['lr']
lowercase__ = int(config['num_epochs'] )
lowercase__ = int(config['seed'] )
lowercase__ = int(config['batch_size'] )
lowercase__ = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowercase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase__ = batch_size // MAX_GPU_BATCH_SIZE
lowercase__ = MAX_GPU_BATCH_SIZE
set_seed(A__ )
# New Code #
# Create our folds:
lowercase__ = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
lowercase__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(A__ ):
lowercase__, lowercase__, lowercase__ = get_fold_dataloaders(
A__ , A__ , A__ , A__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=A__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=A__ )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=100 , num_training_steps=(len(A__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Now we train the model
for epoch in range(A__ ):
model.train()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ = model(**A__ )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A__ )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__, lowercase__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=A__ , references=A__ , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , A__ )
# New Code #
# We also run predictions on the test set at the very end
lowercase__ = []
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A__ )
lowercase__ = outputs.logits
lowercase__, lowercase__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(A__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowercase__ = torch.cat(A__ , dim=0 )
lowercase__ = torch.stack(A__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowercase__ = metric.compute(predictions=A__ , references=A__ )
accelerator.print('Average test metrics from all folds:' , A__ )
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=A__ , default=A__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=A__ , default=3 , help='The number of splits to perform across the dataset' )
lowercase__ = parser.parse_args()
lowercase__ = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 642
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a__ : int = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
a__ : Optional[Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
a__ : Tuple = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any]) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def UpperCAmelCase ( self : int , lowerCAmelCase : List[List[List[str]]] , lowerCAmelCase : List[List[str]] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase , hypotheses=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase)
}
| 642
| 1
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
a__ : int = 5_00_03
a__ : Dict = 5_00_02
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Any = PLBartTokenizer
A : str = None
A : str = False
def UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = PLBartTokenizer(lowerCAmelCase , language_codes='base' , keep_accents=lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
lowercase__ = PLBartTokenizer(lowerCAmelCase , language_codes='base' , keep_accents=lowerCAmelCase)
lowercase__ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
lowercase__ = tokenizer.vocab_size
lowercase__ = [tokenizer.convert_ids_to_tokens(lowerCAmelCase) for x in range(end - 4 , lowerCAmelCase)]
self.assertListEqual(lowerCAmelCase , ['__java__', '__python__', '__en_XX__', '<mask>'])
lowercase__ = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
lowercase__ = tokenizer(lowerCAmelCase).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase) , lowerCAmelCase , )
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
lowercase__ = PLBartTokenizer(lowerCAmelCase , language_codes='multi' , keep_accents=lowerCAmelCase)
lowercase__ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
lowercase__ = tokenizer.vocab_size
lowercase__ = [tokenizer.convert_ids_to_tokens(lowerCAmelCase) for x in range(end - 7 , lowerCAmelCase)]
self.assertListEqual(
lowerCAmelCase , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'])
lowercase__ = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
lowercase__ = tokenizer(lowerCAmelCase).input_ids
self.assertEqual(
tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase) , lowerCAmelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
A : List[Any] = "uclanlp/plbart-python-en_XX"
A : List[Any] = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
A : Any = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
A : str = [
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def UpperCAmelCase ( cls : Any) -> str:
"""simple docstring"""
lowercase__ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX')
lowercase__ = 1
return cls
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 5_00_01)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 5_00_02)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 5_00_03)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
self.assertIn(lowerCAmelCase , self.tokenizer.all_special_ids)
lowercase__ = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
lowercase__ = self.tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase)
lowercase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase)
self.assertEqual(lowerCAmelCase , lowerCAmelCase)
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
lowercase__ = ['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 20]
self.assertIsInstance(src_text[0] , lowerCAmelCase)
lowercase__ = 10
lowercase__ = self.tokenizer(lowerCAmelCase , max_length=lowerCAmelCase , truncation=lowerCAmelCase).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , lowerCAmelCase)
self.assertEqual(len(lowerCAmelCase) , lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__']) , [5_00_04, 5_00_01])
def UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase)
lowercase__ = PLBartTokenizer.from_pretrained(lowerCAmelCase)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase)
@require_torch
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase , return_tensors='pt')
lowercase__ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE])
self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase)
self.assertEqual(batch.decoder_input_ids[1][-1] , 2)
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE])
@require_torch
def UpperCAmelCase ( self : Dict) -> str:
"""simple docstring"""
lowercase__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=len(self.expected_src_tokens) , return_tensors='pt' , )
lowercase__ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id)
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase)
self.assertEqual((2, 26) , batch.input_ids.shape)
self.assertEqual((2, 26) , batch.attention_mask.shape)
lowercase__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE])
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
lowercase__ = self.tokenizer(self.src_text , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=3 , return_tensors='pt')
lowercase__ = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=10 , return_tensors='pt')
lowercase__ = targets['input_ids']
lowercase__ = shift_tokens_right(lowerCAmelCase , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
lowercase__ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java')
self.assertEqual(
nested_simplify(lowerCAmelCase) , {
# A, test, EOS, en_XX
'input_ids': [[1_50, 2_42, 2, 5_00_03]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 5_00_01,
} , )
| 642
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Dict=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=99 , lowerCAmelCase : List[Any]=[1, 1, 2] , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : int=32 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : int=37 , lowerCAmelCase : Any="gelu_new" , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : str=5_12 , lowerCAmelCase : str=3 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]=False , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = block_sizes
lowercase__ = num_decoder_layers
lowercase__ = d_model
lowercase__ = n_head
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = hidden_act
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = 2
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase__ = n_head
# Used in the tests to check the size of the first hidden state
lowercase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase__ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase__ = self.num_hidden_layers + 2
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , ) -> str:
"""simple docstring"""
lowercase__ = TFFunnelForPreTraining(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForMaskedLM(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForSequenceClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = TFFunnelForMultipleChoice(config=lowerCAmelCase)
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForTokenClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForQuestionAnswering(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
A : Dict = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Optional[int] = False
A : Optional[int] = False
def UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase)
@require_tf
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
A : List[str] = False
A : int = False
def UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self , base=lowerCAmelCase)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase)
| 642
| 1
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Tuple = (DDPMScheduler,)
def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Optional[Any]) -> Dict:
"""simple docstring"""
lowercase__ = {
'num_train_timesteps': 10_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**lowerCAmelCase)
return config
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , )
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.02)) < 1E-5
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__ = len(lowerCAmelCase)
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = torch.manual_seed(0)
for t in reversed(range(lowerCAmelCase)):
# 1. predict noise residual
lowercase__ = model(lowerCAmelCase , lowerCAmelCase)
# 2. predict previous mean of sample x_t-1
lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ = pred_prev_sample
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 2_58.96_06) < 1E-2
assert abs(result_mean.item() - 0.33_72) < 1E-3
def UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(prediction_type='v_prediction')
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__ = len(lowerCAmelCase)
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = torch.manual_seed(0)
for t in reversed(range(lowerCAmelCase)):
# 1. predict noise residual
lowercase__ = model(lowerCAmelCase , lowerCAmelCase)
# 2. predict previous mean of sample x_t-1
lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ = pred_prev_sample
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 2_02.02_96) < 1E-2
assert abs(result_mean.item() - 0.26_31) < 1E-3
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__ = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase)
lowercase__ = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase):
if i == len(lowerCAmelCase) - 1:
lowercase__ = -1
else:
lowercase__ = timesteps[i + 1]
lowercase__ = scheduler.previous_timestep(lowerCAmelCase)
lowercase__ = prev_t.item()
self.assertEqual(lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__ = [1_00, 87, 50, 51, 0]
with self.assertRaises(lowerCAmelCase , msg='`custom_timesteps` must be in descending order.'):
scheduler.set_timesteps(timesteps=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__ = [1_00, 87, 50, 1, 0]
lowercase__ = len(lowerCAmelCase)
with self.assertRaises(lowerCAmelCase , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.'):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase , timesteps=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase)
| 642
|
def _lowerCAmelCase ( A__ , A__ , A__ ):
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
lowercase__ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowercase__ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 1
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Any = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
a__ : List[Any] = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
a__ : Dict = {
"abeja/gpt-neox-japanese-2.7b": 20_48,
}
def _lowerCAmelCase ( A__ , A__ ):
with open(A__ , 'r' , encoding='utf-8' ) as f:
lowercase__ = json.loads(f.read() )
lowercase__ = collections.OrderedDict()
lowercase__ = collections.OrderedDict()
lowercase__ = collections.OrderedDict()
with open(A__ , 'r' , encoding='utf-8' ) as f:
lowercase__ = f.readlines()
lowercase__ = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(A__ ):
lowercase__ = b
lowercase__ = idx
for wd in b:
lowercase__ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Tuple = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : Any , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[Any]="<|endoftext|>" , lowerCAmelCase : Dict="<|endoftext|>" , lowerCAmelCase : int="<|startoftext|>" , lowerCAmelCase : str="<|endoftext|>" , lowerCAmelCase : Optional[int]=False , **lowerCAmelCase : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , do_clean_text=lowerCAmelCase , **lowerCAmelCase , )
if not os.path.isfile(lowerCAmelCase):
raise ValueError(
f'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`')
if not os.path.isfile(lowerCAmelCase):
raise ValueError(
f'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`')
lowercase__ = do_clean_text
lowercase__, lowercase__, lowercase__, lowercase__ = load_vocab_and_emoji(lowerCAmelCase , lowerCAmelCase)
lowercase__ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
return len(self.raw_vocab)
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder)
def UpperCAmelCase ( self : Any , lowerCAmelCase : Dict) -> Tuple:
"""simple docstring"""
return self.subword_tokenizer.tokenize(lowerCAmelCase , clean=self.do_clean_text)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
return self.vocab.get(lowerCAmelCase , self.vocab.get(self.unk_token))
def UpperCAmelCase ( self : int , lowerCAmelCase : List[Any]) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
lowercase__ = ''.join(lowerCAmelCase).strip()
return out_string
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : "Conversation") -> List[int]:
"""simple docstring"""
lowercase__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase) + [self.eos_token_id])
if len(lowerCAmelCase) > self.model_max_length:
lowercase__ = input_ids[-self.model_max_length :]
return input_ids
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
lowercase__ = 0
if os.path.isdir(lowerCAmelCase):
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'])
else:
lowercase__ = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
lowercase__ = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(lowerCAmelCase , 'w' , encoding='utf-8') as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!')
lowercase__ = token_index
writer.write(','.join(lowerCAmelCase) + '\n')
index += 1
with open(lowerCAmelCase , 'w' , encoding='utf-8') as writer:
json.dump(self.emoji , lowerCAmelCase)
return vocab_file, emoji_file
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def __init__( self : str , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : int) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = vocab # same as swe
lowercase__ = ids_to_tokens # same as bpe
lowercase__ = emoji
lowercase__ = np.max([len(lowerCAmelCase) for w in self.vocab.keys()])
lowercase__ = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)')
lowercase__ = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*')
lowercase__ = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}')
lowercase__ = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*')
lowercase__ = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*')
lowercase__ = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*')
lowercase__ = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
lowercase__ = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
lowercase__ = str.maketrans({k: '<BLOCK>' for k in keisen + blocks})
def __len__( self : Dict) -> Dict:
"""simple docstring"""
return len(self.ids_to_tokens)
def UpperCAmelCase ( self : int , lowerCAmelCase : Dict) -> Any:
"""simple docstring"""
lowercase__ = self.content_repattera.sub('<URL>' , lowerCAmelCase)
lowercase__ = self.content_repattera.sub('<EMAIL>' , lowerCAmelCase)
lowercase__ = self.content_repattera.sub('<TEL>' , lowerCAmelCase)
lowercase__ = self.content_repattera.sub('<DATE>' , lowerCAmelCase)
lowercase__ = self.content_repattera.sub('<DATE>' , lowerCAmelCase)
lowercase__ = self.content_repattera.sub('<PRICE>' , lowerCAmelCase)
lowercase__ = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
lowercase__ = content.replace('<BLOCK><BLOCK>' , '<BLOCK>')
return content
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : str=False) -> Tuple:
"""simple docstring"""
lowercase__ = text.replace(' ' , '<SP>')
lowercase__ = text.replace(' ' , '<SP>')
lowercase__ = text.replace('\r\n' , '<BR>')
lowercase__ = text.replace('\n' , '<BR>')
lowercase__ = text.replace('\r' , '<BR>')
lowercase__ = text.replace('\t' , '<TAB>')
lowercase__ = text.replace('—' , 'ー')
lowercase__ = text.replace('−' , 'ー')
for k, v in self.emoji["emoji"].items():
if k in text:
lowercase__ = text.replace(lowerCAmelCase , lowerCAmelCase)
if clean:
lowercase__ = self.clean_text(lowerCAmelCase)
def check_simbol(lowerCAmelCase : str):
lowercase__ = x.encode()
if len(lowerCAmelCase) == 1 and len(lowerCAmelCase) == 2:
lowercase__ = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(lowerCAmelCase : Optional[Any]):
lowercase__ = x.encode()
if len(lowerCAmelCase) == 1 and len(lowerCAmelCase) == 3:
lowercase__ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
lowercase__ = 0
lowercase__ = []
while pos < len(lowerCAmelCase):
lowercase__ = min(len(lowerCAmelCase) , pos + self.maxlen + 1) if text[pos] == '<' else pos + 3
lowercase__ = [] # (token_id, token, pos)
for e in range(lowerCAmelCase , lowerCAmelCase , -1):
lowercase__ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase) > 2:
lowercase__ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(lowerCAmelCase) > 0:
# the smallest token_id is adopted
lowercase__, lowercase__, lowercase__ = sorted(lowerCAmelCase , key=lambda lowerCAmelCase: x[0])[0]
result.append(lowerCAmelCase)
lowercase__ = e
else:
lowercase__ = pos + 1
lowercase__ = text[pos:end]
if check_simbol(lowerCAmelCase):
result.append('<KIGOU>')
elif checkuae(lowerCAmelCase):
result.append('<U2000U2BFF>')
else:
for i in wd.encode('utf-8'):
result.append('<|byte%d|>' % i)
lowercase__ = end
return result
def UpperCAmelCase ( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[Any]="\n") -> Optional[int]:
"""simple docstring"""
lowercase__ = []
lowercase__ = []
lowercase__ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(lowerCAmelCase) > 0:
words.append(bytearray(lowerCAmelCase).decode('utf-8' , errors='replace'))
lowercase__ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word])
elif word == "<SP>":
words.append(' ')
elif word == "<BR>":
words.append(lowerCAmelCase)
elif word == "<TAB>":
words.append('\t')
elif word == "<BLOCK>":
words.append('▀')
elif word == "<KIGOU>":
words.append('ǀ')
elif word == "<U2000U2BFF>":
words.append('‖')
else:
words.append(lowerCAmelCase)
if len(lowerCAmelCase) > 0:
words.append(bytearray(lowerCAmelCase).decode('utf-8' , errors='replace'))
lowercase__ = ''.join(lowerCAmelCase)
return text
| 642
|
from __future__ import annotations
def _lowerCAmelCase ( A__ , A__ ):
if b == 0:
return (1, 0)
((lowercase__), (lowercase__)) = extended_euclid(A__ , a % b )
lowercase__ = a // b
return (y, x - k * y)
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
def _lowerCAmelCase ( A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
if b < 0:
lowercase__ = (b % n + n) % n
return b
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__, lowercase__ = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 642
| 1
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
a__ : str = input("Enter image url: ").strip()
print(F'''Downloading image from {url} ...''')
a__ : Tuple = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
a__ : str = soup.find("meta", {"property": "og:image"})["content"]
a__ : int = requests.get(image_url).content
a__ : Dict = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, "wb") as fp:
fp.write(image_data)
print(F'''Done. Image saved to disk as {file_name}.''')
| 642
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[Any] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = "umt5"
A : List[str] = ["past_key_values"]
def __init__( self : List[Any] , lowerCAmelCase : Optional[int]=25_01_12 , lowerCAmelCase : str=5_12 , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : int=32 , lowerCAmelCase : int=1_28 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=1E-6 , lowerCAmelCase : Optional[int]=1.0 , lowerCAmelCase : Optional[Any]="gated-gelu" , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]="T5Tokenizer" , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=0 , **lowerCAmelCase : int , ) -> str:
"""simple docstring"""
super().__init__(
is_encoder_decoder=lowerCAmelCase , tokenizer_class=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_kv
lowercase__ = d_ff
lowercase__ = num_layers
lowercase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ = num_heads
lowercase__ = relative_attention_num_buckets
lowercase__ = relative_attention_max_distance
lowercase__ = dropout_rate
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_factor
lowercase__ = feed_forward_proj
lowercase__ = use_cache
lowercase__ = self.feed_forward_proj.split('-')
lowercase__ = act_info[-1]
lowercase__ = act_info[0] == 'gated'
if len(lowerCAmelCase) > 1 and act_info[0] != "gated" or len(lowerCAmelCase) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
lowercase__ = 'gelu_new'
@property
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
return self.d_model
@property
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
return self.num_heads
@property
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return self.num_layers
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCAmelCase ( self : Optional[int]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase__ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowercase__ = 'past_encoder_sequence + sequence'
lowercase__ = {0: 'batch'}
lowercase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return 13
@property
def UpperCAmelCase ( self : Optional[Any]) -> float:
"""simple docstring"""
return 5E-4
| 642
| 1
|
import heapq
import sys
import numpy as np
a__ : Dict = tuple[int, int]
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = []
lowercase__ = set()
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf')
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return len(self.elements) == 0
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]) -> List[str]:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(lowerCAmelCase)
else:
# update
# print("update", item)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
if item in self.set:
self.set.remove(lowerCAmelCase)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
self.set.remove(lowerCAmelCase)
return (priority, item)
def _lowerCAmelCase ( A__ , A__ ):
# euclidean distance
lowercase__ = np.array(A__ )
lowercase__ = np.array(A__ )
return np.linalg.norm(a - b )
def _lowerCAmelCase ( A__ , A__ ):
# integer division by time variable
return consistent_heuristic(A__ , A__ ) // t
def _lowerCAmelCase ( A__ , A__ ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = g_function[start] + Wa * heuristics[i](A__ , A__ )
return ans
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = np.chararray((n, n) )
for i in range(A__ ):
for j in range(A__ ):
lowercase__ = '*'
for i in range(A__ ):
for j in range(A__ ):
if (j, (n - 1) - i) in blocks:
lowercase__ = '#'
lowercase__ = '-'
lowercase__ = back_pointer[goal]
while x != start:
((lowercase__), (lowercase__)) = x
# print(x)
lowercase__ = '-'
lowercase__ = back_pointer[x]
lowercase__ = '-'
for i in range(A__ ):
for j in range(A__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowercase__ = back_pointer[goal]
while x != start:
print(A__ , end=' ' )
lowercase__ = back_pointer[x]
print(A__ )
sys.exit()
def _lowerCAmelCase ( A__ ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
for itera in range(A__ ):
open_list[itera].remove_element(A__ )
# print("s", s)
# print("j", j)
((lowercase__), (lowercase__)) = s
lowercase__ = (x - 1, y)
lowercase__ = (x + 1, y)
lowercase__ = (x, y + 1)
lowercase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(A__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(A__ )
lowercase__ = -1
lowercase__ = float('inf' )
if valid(A__ ) and g_function[neighbours] > g_function[s] + 1:
lowercase__ = g_function[s] + 1
lowercase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(A__ , key(A__ , 0 , A__ , A__ ) )
if neighbours not in close_list_inad:
for var in range(1 , A__ ):
if key(A__ , A__ , A__ , A__ ) <= Wa * key(
A__ , 0 , A__ , A__ ):
open_list[j].put(
A__ , key(A__ , A__ , A__ , A__ ) )
def _lowerCAmelCase ( ):
lowercase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a__ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a__ : Any = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a__ : Any = make_common_ground()
a__ : Union[str, Any] = blocks_blk
# hyper parameters
a__ : List[Any] = 1
a__ : List[str] = 1
a__ : Optional[int] = 20
a__ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a__ : Tuple = (0, 0)
a__ : str = (n - 1, n - 1)
a__ : Optional[Any] = 1
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = {start: 0, goal: float('inf' )}
lowercase__ = {start: -1, goal: -1}
lowercase__ = []
lowercase__ = set()
for i in range(A__ ):
open_list.append(PriorityQueue() )
open_list[i].put(A__ , key(A__ , A__ , A__ , A__ ) )
lowercase__ = []
lowercase__ = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , A__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__, lowercase__ = open_list[i].top_show()
visited.add(A__ )
expand_state(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_inad.append(A__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__ = open_list[0].top_show()
visited.add(A__ )
expand_state(
A__ , 0 , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_anchor.append(A__ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(A__ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 642
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : str = XGLMTokenizer
A : List[Any] = XGLMTokenizerFast
A : int = True
A : Optional[Any] = True
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = '<pad>'
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase) , lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase) , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(len(lowerCAmelCase) , 10_08)
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_08)
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
lowercase__ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return XGLMTokenizer.from_pretrained('facebook/xglm-564M')
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase , f.name)
lowercase__ = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase)
lowercase__ = pickle.dumps(lowerCAmelCase)
pickle.loads(lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = 'I was born in 92000, and this is falsé.'
lowercase__ = tokenizer.tokenize(lowerCAmelCase)
lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
@slow
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
lowercase__ = 'Hello World!'
lowercase__ = [2, 3_12_27, 44_47, 35]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowercase__ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = {
'input_ids': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name='facebook/xglm-564M' , padding=lowerCAmelCase , )
| 642
| 1
|
from functools import lru_cache
def _lowerCAmelCase ( A__ ):
lowercase__ = 2
lowercase__ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(A__ )
if n > 1:
factors.add(A__ )
return factors
@lru_cache
def _lowerCAmelCase ( A__ ):
return len(unique_prime_factors(A__ ) )
def _lowerCAmelCase ( A__ ):
return len(set(A__ ) ) in (0, 1)
def _lowerCAmelCase ( A__ ):
lowercase__ = 2
while True:
# Increment each value of a generated range
lowercase__ = [base + i for i in range(A__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowercase__ = [upf_len(A__ ) for x in group]
checker.append(A__ )
# If all numbers in the list are equal, return the group variable.
if equality(A__ ):
return group
# Increment our base variable by 1
base += 1
def _lowerCAmelCase ( A__ = 4 ):
lowercase__ = run(A__ )
return results[0] if len(A__ ) else None
if __name__ == "__main__":
print(solution())
| 642
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
lowercase__ = data
lowercase__ = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0]
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = B'\x80' + B'\x00' * (63 - (len(self.data) + 8) % 64)
lowercase__ = self.data + padding + struct.pack('>Q' , 8 * len(self.data))
return padded_data
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64)
]
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> List[Any]:
"""simple docstring"""
lowercase__ = list(struct.unpack('>16L' , lowerCAmelCase)) + [0] * 64
for i in range(16 , 80):
lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1)
return w
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.padding()
lowercase__ = self.split_blocks()
for block in self.blocks:
lowercase__ = self.expand_block(lowerCAmelCase)
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.h
for i in range(0 , 80):
if 0 <= i < 20:
lowercase__ = (b & c) | ((~b) & d)
lowercase__ = 0X5_A_8_2_7_9_9_9
elif 20 <= i < 40:
lowercase__ = b ^ c ^ d
lowercase__ = 0X6_E_D_9_E_B_A_1
elif 40 <= i < 60:
lowercase__ = (b & c) | (b & d) | (c & d)
lowercase__ = 0X8_F_1_B_B_C_D_C
elif 60 <= i < 80:
lowercase__ = b ^ c ^ d
lowercase__ = 0XC_A_6_2_C_1_D_6
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = (
self.rotate(lowerCAmelCase , 5) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F,
a,
self.rotate(lowerCAmelCase , 30),
c,
d,
)
lowercase__ = (
self.h[0] + a & 0XF_F_F_F_F_F_F_F,
self.h[1] + b & 0XF_F_F_F_F_F_F_F,
self.h[2] + c & 0XF_F_F_F_F_F_F_F,
self.h[3] + d & 0XF_F_F_F_F_F_F_F,
self.h[4] + e & 0XF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h)
def _lowerCAmelCase ( ):
lowercase__ = B'Test String'
assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowercase__ = parser.parse_args()
lowercase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase__ = f.read()
else:
lowercase__ = bytes(A__ , 'utf-8' )
print(SHAaHash(A__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 642
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Tuple = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
a__ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
a__ : List[Any] = logging.get_logger(__name__)
a__ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
a__ : List[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
a__ : int = {
"facebook/bart-base": 10_24,
"facebook/bart-large": 10_24,
"facebook/bart-large-mnli": 10_24,
"facebook/bart-large-cnn": 10_24,
"facebook/bart-large-xsum": 10_24,
"yjernite/bart_eli5": 10_24,
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Optional[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = ["input_ids", "attention_mask"]
A : Any = BartTokenizer
def __init__( self : List[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str="replace" , lowerCAmelCase : str="<s>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : str="<unk>" , lowerCAmelCase : int="<pad>" , lowerCAmelCase : int="<mask>" , lowerCAmelCase : Dict=False , lowerCAmelCase : List[Any]=True , **lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = getattr(lowerCAmelCase , pre_tok_state.pop('type'))
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**lowerCAmelCase)
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = 'post_processor'
lowercase__ = getattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state['sep'])
if "cls" in state:
lowercase__ = tuple(state['cls'])
lowercase__ = False
if state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get('trim_offsets' , lowerCAmelCase) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(lowerCAmelCase , state.pop('type'))
lowercase__ = component_class(**lowerCAmelCase)
setattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
@property
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else value
lowercase__ = value
def UpperCAmelCase ( self : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[int]) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase)
return tuple(lowerCAmelCase)
def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=None) -> Tuple:
"""simple docstring"""
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 642
| 1
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 642
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : str = (DDIMParallelScheduler,)
A : Any = (("eta", 0.0), ("num_inference_steps", 50))
def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = {
'num_train_timesteps': 10_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**lowerCAmelCase)
return config
def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**lowerCAmelCase)
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase)
for t in scheduler.timesteps:
lowercase__ = model(lowerCAmelCase , lowerCAmelCase)
lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase).prev_sample
return sample
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase)
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(steps_offset=1)
lowercase__ = scheduler_class(**lowerCAmelCase)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1]))
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , )
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00]):
self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00) - 0.1_47_71)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60) - 0.3_24_60)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98) - 0.02)) < 1E-5
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
scheduler.set_timesteps(lowerCAmelCase)
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = self.dummy_sample_deter + 0.1
lowercase__ = self.dummy_sample_deter - 0.1
lowercase__ = samplea.shape[0]
lowercase__ = torch.stack([samplea, samplea, samplea] , dim=0)
lowercase__ = torch.arange(lowerCAmelCase)[0:3, None].repeat(1 , lowerCAmelCase)
lowercase__ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowercase__ = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , lowerCAmelCase)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 11_47.79_04) < 1E-2
assert abs(result_mean.item() - 0.49_82) < 1E-3
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
lowercase__ = self.full_loop()
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_72.00_67) < 1E-2
assert abs(result_mean.item() - 0.22_39_67) < 1E-3
def UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(prediction_type='v_prediction')
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 52.53_02) < 1E-2
assert abs(result_mean.item() - 0.06_84) < 1E-3
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.82_95) < 1E-2
assert abs(result_mean.item() - 0.19_51) < 1E-3
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.07_84) < 1E-2
assert abs(result_mean.item() - 0.19_41) < 1E-3
| 642
| 1
|
def _lowerCAmelCase ( A__ , A__ ):
assert x is not None
assert y is not None
lowercase__ = len(A__ )
lowercase__ = len(A__ )
# declaring the array for storing the dp values
lowercase__ = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
lowercase__ = 1 if x[i - 1] == y[j - 1] else 0
lowercase__ = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
lowercase__ = ''
lowercase__, lowercase__ = m, n
while i > 0 and j > 0:
lowercase__ = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
lowercase__ = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
a__ : Union[str, Any] = "AGGTAB"
a__ : str = "GXTXAYB"
a__ : List[str] = 4
a__ : str = "GTAB"
a__ , a__ : Optional[Any] = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 642
|
import cva
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : float , lowerCAmelCase : int) -> Dict:
"""simple docstring"""
if k in (0.04, 0.06):
lowercase__ = k
lowercase__ = window_size
else:
raise ValueError('invalid k value')
def __str__( self : Tuple) -> str:
"""simple docstring"""
return str(self.k)
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowercase__ = cva.imread(lowerCAmelCase , 0)
lowercase__, lowercase__ = img.shape
lowercase__ = []
lowercase__ = img.copy()
lowercase__ = cva.cvtColor(lowerCAmelCase , cva.COLOR_GRAY2RGB)
lowercase__, lowercase__ = np.gradient(lowerCAmelCase)
lowercase__ = dx**2
lowercase__ = dy**2
lowercase__ = dx * dy
lowercase__ = 0.04
lowercase__ = self.window_size // 2
for y in range(lowerCAmelCase , h - offset):
for x in range(lowerCAmelCase , w - offset):
lowercase__ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = (wxx * wyy) - (wxy**2)
lowercase__ = wxx + wyy
lowercase__ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0) , 0)
color_img.itemset((y, x, 1) , 0)
color_img.itemset((y, x, 2) , 2_55)
return color_img, corner_list
if __name__ == "__main__":
a__ : Dict = HarrisCorner(0.0_4, 3)
a__ , a__ : Dict = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 642
| 1
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase__:
'''simple docstring'''
A : List[Any] = XGLMConfig
A : Dict = {}
A : Tuple = "gelu"
def __init__( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any]=14 , lowerCAmelCase : Union[str, Any]=7 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : Optional[Any]=99 , lowerCAmelCase : Any=32 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : int=4 , lowerCAmelCase : Optional[int]=37 , lowerCAmelCase : Optional[Any]="gelu" , lowerCAmelCase : int=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Tuple=5_12 , lowerCAmelCase : List[Any]=0.02 , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = ffn_dim
lowercase__ = activation_function
lowercase__ = activation_dropout
lowercase__ = attention_dropout
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = None
lowercase__ = 0
lowercase__ = 2
lowercase__ = 1
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
return XGLMConfig.from_pretrained('facebook/xglm-564M')
def UpperCAmelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
lowercase__ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3)
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ = self.get_config()
lowercase__ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def UpperCAmelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCAmelCase , )
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = config_and_inputs
lowercase__ = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
A : Optional[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
A : List[Any] = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
A : str = False
A : str = False
A : str = False
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
lowercase__ = TFXGLMModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase , n_embd=37)
def UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFXGLMModel.from_pretrained(lowerCAmelCase)
self.assertIsNotNone(lowerCAmelCase)
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.')
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Any , lowerCAmelCase : List[str]=True) -> Dict:
"""simple docstring"""
lowercase__ = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
lowercase__ = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
lowercase__ = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
lowercase__ = model.generate(lowerCAmelCase , do_sample=lowerCAmelCase , num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase)
@slow
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
lowercase__ = XGLMTokenizer.from_pretrained('facebook/xglm-564M')
lowercase__ = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
tf.random.set_seed(0)
lowercase__ = tokenizer('Today is a nice day and' , return_tensors='tf')
lowercase__ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0'):
lowercase__ = model.generate(lowerCAmelCase , do_sample=lowerCAmelCase , seed=[7, 0])
lowercase__ = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase)
lowercase__ = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowerCAmelCase , lowerCAmelCase)
@slow
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
lowercase__ = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
lowercase__ = XGLMTokenizer.from_pretrained('facebook/xglm-564M')
lowercase__ = 'left'
# use different length sentences to test batching
lowercase__ = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
lowercase__ = tokenizer(lowerCAmelCase , return_tensors='tf' , padding=lowerCAmelCase)
lowercase__ = inputs['input_ids']
lowercase__ = model.generate(input_ids=lowerCAmelCase , attention_mask=inputs['attention_mask'] , max_new_tokens=12)
lowercase__ = tokenizer(sentences[0] , return_tensors='tf').input_ids
lowercase__ = model.generate(input_ids=lowerCAmelCase , max_new_tokens=12)
lowercase__ = tokenizer(sentences[1] , return_tensors='tf').input_ids
lowercase__ = model.generate(input_ids=lowerCAmelCase , max_new_tokens=12)
lowercase__ = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase)
lowercase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase)
lowercase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase)
lowercase__ = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , [non_padded_sentence, padded_sentence])
| 642
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : List[Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : int = "speech_to_text"
A : Optional[Any] = ["past_key_values"]
A : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[int] , lowerCAmelCase : Tuple=1_00_00 , lowerCAmelCase : int=12 , lowerCAmelCase : int=20_48 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : str=6 , lowerCAmelCase : Dict=20_48 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict="relu" , lowerCAmelCase : Tuple=2_56 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Any=60_00 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[Any]=(5, 5) , lowerCAmelCase : Union[str, Any]=10_24 , lowerCAmelCase : List[Any]=80 , lowerCAmelCase : List[str]=1 , **lowerCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_source_positions
lowercase__ = max_target_positions
lowercase__ = num_conv_layers
lowercase__ = list(lowerCAmelCase)
lowercase__ = conv_channels
lowercase__ = input_feat_per_channel
lowercase__ = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''')
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 642
| 1
|
def _lowerCAmelCase ( A__ ):
lowercase__ = len(A__ )
lowercase__ = len(matrix[0] )
lowercase__ = min(A__ , A__ )
for row in range(A__ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , A__ ):
lowercase__ = matrix[col][row] / matrix[row][row]
for i in range(A__ , A__ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
lowercase__ = True
for i in range(row + 1 , A__ ):
if matrix[i][row] != 0:
lowercase__, lowercase__ = matrix[i], matrix[row]
lowercase__ = False
break
if reduce:
rank -= 1
for i in range(A__ ):
lowercase__ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Any = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 1
|
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class UpperCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self : Any) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Linear(3 , 4)
lowercase__ = nn.BatchNormad(4)
lowercase__ = nn.Linear(4 , 5)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any]) -> Optional[int]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase)))
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Optional[int] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Optional[int]) -> Optional[int]:
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Dict) -> List[str]:
"""simple docstring"""
return output + 1
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
lowercase__ = ModelForTest()
lowercase__ = ModelHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase)
self.assertEqual(test_model._hf_hook , lowerCAmelCase)
self.assertTrue(hasattr(lowerCAmelCase , '_old_forward'))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['x'])
remove_hook_from_module(lowerCAmelCase)
self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook'))
self.assertFalse(hasattr(lowerCAmelCase , '_old_forward'))
def UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
lowercase__ = ModelForTest()
lowercase__ = ModelHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase)
add_hook_to_module(lowerCAmelCase , lowerCAmelCase , append=lowerCAmelCase)
self.assertEqual(isinstance(test_model._hf_hook , lowerCAmelCase) , lowerCAmelCase)
self.assertEqual(len(test_model._hf_hook.hooks) , 2)
self.assertTrue(hasattr(lowerCAmelCase , '_old_forward'))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['x'])
remove_hook_from_module(lowerCAmelCase)
self.assertFalse(hasattr(lowerCAmelCase , '_hf_hook'))
self.assertFalse(hasattr(lowerCAmelCase , '_old_forward'))
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = ModelForTest()
lowercase__ = torch.randn(2 , 3)
lowercase__ = test_model(x + 1)
lowercase__ = test_model(x + 2)
lowercase__ = PreForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase)
lowercase__ = test_model(lowerCAmelCase)
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
lowercase__ = PreForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase)
lowercase__ = test_model(lowerCAmelCase)
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
lowercase__ = SequentialHook(PreForwardHook() , PreForwardHook())
add_hook_to_module(lowerCAmelCase , lowerCAmelCase)
lowercase__ = test_model(lowerCAmelCase)
assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5)
def UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = ModelForTest()
lowercase__ = torch.randn(2 , 3)
lowercase__ = test_model(lowerCAmelCase)
lowercase__ = PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase)
lowercase__ = test_model(lowerCAmelCase)
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
lowercase__ = PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase)
lowercase__ = test_model(lowerCAmelCase)
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1 , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
lowercase__ = SequentialHook(PostForwardHook() , PostForwardHook())
add_hook_to_module(lowerCAmelCase , lowerCAmelCase)
lowercase__ = test_model(lowerCAmelCase)
assert torch.allclose(lowerCAmelCase , output + 2 , atol=1E-5)
def UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = ModelForTest()
lowercase__ = torch.randn(2 , 3)
lowercase__ = test_model(lowerCAmelCase)
lowercase__ = PostForwardHook()
add_hook_to_module(lowerCAmelCase , lowerCAmelCase)
lowercase__ = test_model(lowerCAmelCase)
self.assertTrue(torch.allclose(lowerCAmelCase , output + 1))
self.assertTrue(outputa.requires_grad)
lowercase__ = True
lowercase__ = test_model(lowerCAmelCase)
self.assertFalse(outputa.requires_grad)
@require_multi_gpu
def UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
lowercase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1))
self.assertEqual(model.lineara.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0))
self.assertEqual(model.lineara.weight.device , torch.device(1))
# We can still make a forward pass. The input does not need to be on any particular device
lowercase__ = torch.randn(2 , 3)
lowercase__ = model(lowerCAmelCase)
self.assertEqual(output.device , torch.device(1))
# We can add a general hook to put back output on same device as input.
add_hook_to_module(lowerCAmelCase , AlignDevicesHook(io_same_device=lowerCAmelCase))
lowercase__ = torch.randn(2 , 3).to(0)
lowercase__ = model(lowerCAmelCase)
self.assertEqual(output.device , torch.device(0))
def UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
# This will move each submodule on different devices
lowercase__ = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase))
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase))
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.weight.device , torch.device('meta'))
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
# Buffers are not included in the offload by default, so are on the execution device
lowercase__ = torch.device(hook_kwargs['execution_device'])
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase)
lowercase__ = torch.randn(2 , 3)
lowercase__ = model(lowerCAmelCase)
self.assertEqual(output.device , lowerCAmelCase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
# Now test with buffers included in the offload
lowercase__ = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowerCAmelCase))
add_hook_to_module(model.lineara , AlignDevicesHook(**lowerCAmelCase))
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.weight.device , torch.device('meta'))
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta'))
lowercase__ = torch.randn(2 , 3)
lowercase__ = model(lowerCAmelCase)
self.assertEqual(output.device , lowerCAmelCase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
def UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
lowercase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
# This will move each submodule on different devices
lowercase__ = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.weight.device , torch.device('meta'))
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
# Buffers are not included in the offload by default, so are on the execution device
lowercase__ = torch.device(lowerCAmelCase)
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase)
lowercase__ = torch.randn(2 , 3)
lowercase__ = model(lowerCAmelCase)
self.assertEqual(output.device , lowerCAmelCase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase)
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
# Now test with buffers included in the offload
attach_align_device_hook(lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , offload_buffers=lowerCAmelCase)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.weight.device , torch.device('meta'))
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta'))
lowercase__ = torch.randn(2 , 3)
lowercase__ = model(lowerCAmelCase)
self.assertEqual(output.device , lowerCAmelCase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase)
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
def UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
lowercase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
# This will move each submodule on different devices
lowercase__ = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict())
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.weight.device , torch.device('meta'))
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
# Buffers are not included in the offload by default, so are on the execution device
lowercase__ = torch.device(lowerCAmelCase)
self.assertEqual(model.batchnorm.running_mean.device , lowerCAmelCase)
lowercase__ = torch.randn(2 , 3)
lowercase__ = model(lowerCAmelCase)
self.assertEqual(output.device , lowerCAmelCase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase)
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
# Now test with buffers included in the offload
attach_align_device_hook(
lowerCAmelCase , execution_device=lowerCAmelCase , offload=lowerCAmelCase , weights_map=model.state_dict() , offload_buffers=lowerCAmelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.weight.device , torch.device('meta'))
self.assertEqual(model.lineara.weight.device , torch.device('meta'))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta'))
lowercase__ = torch.randn(2 , 3)
lowercase__ = model(lowerCAmelCase)
self.assertEqual(output.device , lowerCAmelCase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(lowerCAmelCase)
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu'))
self.assertEqual(model.lineara.weight.device , torch.device('cpu'))
| 642
|
# Imports
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None) -> Dict:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : str=None , lowerCAmelCase : str=None) -> int:
"""simple docstring"""
if red is not None:
lowercase__ = red
if green is not None:
lowercase__ = green
if blue is not None:
lowercase__ = blue
if red_edge is not None:
lowercase__ = red_edge
if nir is not None:
lowercase__ = nir
return True
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Union[str, Any]="" , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None) -> Union[str, Any]:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
lowercase__ = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!')
return False
def UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any]=0.08 , lowerCAmelCase : Optional[int]=1.22 , lowerCAmelCase : int=0.03) -> List[Any]:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return (self.nir / self.green) - 1
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
return self.nir - self.green
def UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def UpperCAmelCase ( self : int , lowerCAmelCase : int=0.16) -> Dict:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self : str , lowerCAmelCase : Optional[int]=0.5) -> Union[str, Any]:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None) -> Tuple:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
lowercase__ = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
lowercase__ = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def UpperCAmelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 642
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _lowerCAmelCase ( A__ ):
lowercase__ = 384
lowercase__ = 7
if "tiny" in model_name:
lowercase__ = 96
lowercase__ = (2, 2, 6, 2)
lowercase__ = (3, 6, 12, 24)
elif "small" in model_name:
lowercase__ = 96
lowercase__ = (2, 2, 18, 2)
lowercase__ = (3, 6, 12, 24)
elif "base" in model_name:
lowercase__ = 128
lowercase__ = (2, 2, 18, 2)
lowercase__ = (4, 8, 16, 32)
lowercase__ = 12
lowercase__ = 512
elif "large" in model_name:
lowercase__ = 192
lowercase__ = (2, 2, 18, 2)
lowercase__ = (6, 12, 24, 48)
lowercase__ = 12
lowercase__ = 768
# set label information
lowercase__ = 150
lowercase__ = 'huggingface/label-files'
lowercase__ = 'ade20k-id2label.json'
lowercase__ = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
lowercase__ = {int(A__ ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = SwinConfig(
embed_dim=A__ , depths=A__ , num_heads=A__ , window_size=A__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
lowercase__ = UperNetConfig(
backbone_config=A__ , auxiliary_in_channels=A__ , num_labels=A__ , idalabel=A__ , labelaid=A__ , )
return config
def _lowerCAmelCase ( A__ ):
lowercase__ = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.stages.{i}.downsample.reduction.weight''', F'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.weight''', F'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.bias''', F'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = dct.pop(A__ )
lowercase__ = val
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowercase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowercase__ = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
lowercase__ = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:dim, :]
lowercase__ = in_proj_bias[: dim]
lowercase__ = in_proj_weight[
dim : dim * 2, :
]
lowercase__ = in_proj_bias[
dim : dim * 2
]
lowercase__ = in_proj_weight[
-dim :, :
]
lowercase__ = in_proj_bias[-dim :]
# fmt: on
def _lowerCAmelCase ( A__ ):
lowercase__, lowercase__ = x.shape
lowercase__ = x.reshape(A__ , 4 , in_channel // 4 )
lowercase__ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(A__ , A__ )
return x
def _lowerCAmelCase ( A__ ):
lowercase__, lowercase__ = x.shape
lowercase__ = x.reshape(A__ , in_channel // 4 , 4 )
lowercase__ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(A__ , A__ )
return x
def _lowerCAmelCase ( A__ ):
lowercase__ = x.shape[0]
lowercase__ = x.reshape(4 , in_channel // 4 )
lowercase__ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(A__ )
return x
def _lowerCAmelCase ( A__ ):
lowercase__ = x.shape[0]
lowercase__ = x.reshape(in_channel // 4 , 4 )
lowercase__ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(A__ )
return x
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
lowercase__ = model_name_to_url[model_name]
lowercase__ = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' , file_name=A__ )[
'state_dict'
]
for name, param in state_dict.items():
print(A__ , param.shape )
lowercase__ = get_upernet_config(A__ )
lowercase__ = UperNetForSemanticSegmentation(A__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(A__ )
if "bn" in key:
lowercase__ = key.replace('bn' , 'batch_norm' )
lowercase__ = val
# rename keys
lowercase__ = create_rename_keys(A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowercase__ = reverse_correct_unfold_reduction_order(A__ )
if "norm" in key:
lowercase__ = reverse_correct_unfold_norm_order(A__ )
model.load_state_dict(A__ )
# verify on image
lowercase__ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
lowercase__ = Image.open(requests.get(A__ , stream=A__ ).raw ).convert('RGB' )
lowercase__ = SegformerImageProcessor()
lowercase__ = processor(A__ , return_tensors='pt' ).pixel_values
with torch.no_grad():
lowercase__ = model(A__ )
lowercase__ = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowercase__ = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] )
elif model_name == "upernet-swin-small":
lowercase__ = torch.tensor(
[[-7.19_21, -7.19_21, -6.95_32], [-7.19_21, -7.19_21, -6.95_32], [-7.09_08, -7.09_08, -6.85_34]] )
elif model_name == "upernet-swin-base":
lowercase__ = torch.tensor(
[[-6.58_51, -6.58_51, -6.43_30], [-6.58_51, -6.58_51, -6.43_30], [-6.47_63, -6.47_63, -6.32_54]] )
elif model_name == "upernet-swin-large":
lowercase__ = torch.tensor(
[[-7.52_97, -7.52_97, -7.38_02], [-7.52_97, -7.52_97, -7.38_02], [-7.40_44, -7.40_44, -7.25_86]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , A__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A__ )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[F'''upernet-swin-{size}''' for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a__ : Any = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 642
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCAmelCase__( unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = load_tool('text-classification')
self.tool.setup()
lowercase__ = load_tool('text-classification' , remote=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
lowercase__ = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
lowercase__ = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
| 642
| 1
|
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
self.test()
def UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
lowercase__ = 0
lowercase__ = False
while not completed:
if counter == 1:
self.reset()
lowercase__ = self.advance()
if not self.does_advance(lowerCAmelCase):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.')
lowercase__, lowercase__, lowercase__ = self.update(lowerCAmelCase)
counter += 1
if counter > 1_00_00:
raise Exception('update() does not fulfill the constraint.')
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.')
@abstractmethod
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def UpperCAmelCase ( self : Any , lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def UpperCAmelCase ( self : int , lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def UpperCAmelCase ( self : str) -> Any:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Optional[Any]=False) -> int:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : List[int]) -> Union[str, Any]:
"""simple docstring"""
super(lowerCAmelCase , self).__init__()
if not isinstance(lowerCAmelCase , lowerCAmelCase) or len(lowerCAmelCase) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''')
if any((not isinstance(lowerCAmelCase , lowerCAmelCase) or token_id < 0) for token_id in token_ids):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''')
lowercase__ = token_ids
lowercase__ = len(self.token_ids)
lowercase__ = -1 # the index of the currently fulfilled step
lowercase__ = False
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int) -> int:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(lowerCAmelCase)}''')
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase ( self : Any , lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(lowerCAmelCase)}''')
lowercase__ = False
lowercase__ = False
lowercase__ = False
if self.does_advance(lowerCAmelCase):
self.fulfilled_idx += 1
lowercase__ = True
if self.fulfilled_idx == (self.seqlen - 1):
lowercase__ = True
lowercase__ = completed
else:
# failed to make progress.
lowercase__ = True
self.reset()
return stepped, completed, reset
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = False
lowercase__ = 0
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : List[str]=False) -> Any:
"""simple docstring"""
lowercase__ = PhrasalConstraint(self.token_ids)
if stateful:
lowercase__ = self.seqlen
lowercase__ = self.fulfilled_idx
lowercase__ = self.completed
return new_constraint
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : List[List[int]] , lowerCAmelCase : Any=True) -> Tuple:
"""simple docstring"""
lowercase__ = max([len(lowerCAmelCase) for one in nested_token_ids])
lowercase__ = {}
for token_ids in nested_token_ids:
lowercase__ = root
for tidx, token_id in enumerate(lowerCAmelCase):
if token_id not in level:
lowercase__ = {}
lowercase__ = level[token_id]
if no_subsets and self.has_subsets(lowerCAmelCase , lowerCAmelCase):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
f''' {nested_token_ids}.''')
lowercase__ = root
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
lowercase__ = self.trie
for current_token in current_seq:
lowercase__ = start[current_token]
lowercase__ = list(start.keys())
return next_tokens
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> int:
"""simple docstring"""
lowercase__ = self.next_tokens(lowerCAmelCase)
return len(lowerCAmelCase) == 0
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Dict) -> Any:
"""simple docstring"""
lowercase__ = list(root.values())
if len(lowerCAmelCase) == 0:
return 1
else:
return sum([self.count_leaves(lowerCAmelCase) for nn in next_nodes])
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
lowercase__ = self.count_leaves(lowerCAmelCase)
return len(lowerCAmelCase) != leaf_count
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase : List[List[int]]) -> int:
"""simple docstring"""
super(lowerCAmelCase , self).__init__()
if not isinstance(lowerCAmelCase , lowerCAmelCase) or len(lowerCAmelCase) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''')
if any(not isinstance(lowerCAmelCase , lowerCAmelCase) for token_ids in nested_token_ids):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''')
if any(
any((not isinstance(lowerCAmelCase , lowerCAmelCase) or token_id < 0) for token_id in token_ids)
for token_ids in nested_token_ids):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''')
lowercase__ = DisjunctiveTrie(lowerCAmelCase)
lowercase__ = nested_token_ids
lowercase__ = self.trie.max_height
lowercase__ = []
lowercase__ = False
def UpperCAmelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.trie.next_tokens(self.current_seq)
if len(lowerCAmelCase) == 0:
return None
else:
return token_list
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : int) -> List[Any]:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowerCAmelCase)}''')
lowercase__ = self.trie.next_tokens(self.current_seq)
return token_id in next_tokens
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : int) -> int:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowerCAmelCase)}''')
lowercase__ = False
lowercase__ = False
lowercase__ = False
if self.does_advance(lowerCAmelCase):
self.current_seq.append(lowerCAmelCase)
lowercase__ = True
else:
lowercase__ = True
self.reset()
lowercase__ = self.trie.reached_leaf(self.current_seq)
lowercase__ = completed
return stepped, completed, reset
def UpperCAmelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
lowercase__ = False
lowercase__ = []
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Union[str, Any]=False) -> str:
"""simple docstring"""
lowercase__ = DisjunctiveConstraint(self.token_ids)
if stateful:
lowercase__ = self.seqlen
lowercase__ = self.current_seq
lowercase__ = self.completed
return new_constraint
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : List[Constraint]) -> Tuple:
"""simple docstring"""
lowercase__ = constraints
# max # of steps required to fulfill a given constraint
lowercase__ = max([c.seqlen for c in constraints])
lowercase__ = len(lowerCAmelCase)
lowercase__ = False
self.init_state()
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
lowercase__ = []
lowercase__ = None
lowercase__ = [constraint.copy(stateful=lowerCAmelCase) for constraint in self.constraints]
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
lowercase__ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints) * self.max_seqlen) + add
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
lowercase__ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowercase__ = constraint.advance()
if isinstance(lowerCAmelCase , lowerCAmelCase):
token_list.append(lowerCAmelCase)
elif isinstance(lowerCAmelCase , lowerCAmelCase):
token_list.extend(lowerCAmelCase)
else:
lowercase__ = self.inprogress_constraint.advance()
if isinstance(lowerCAmelCase , lowerCAmelCase):
token_list.append(lowerCAmelCase)
elif isinstance(lowerCAmelCase , lowerCAmelCase):
token_list.extend(lowerCAmelCase)
if len(lowerCAmelCase) == 0:
return None
else:
return token_list
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Optional[List[int]]) -> int:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowercase__, lowercase__ = self.add(lowerCAmelCase)
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int) -> Dict:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''')
lowercase__, lowercase__ = False, False
if self.completed:
lowercase__ = True
lowercase__ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowercase__, lowercase__, lowercase__ = self.inprogress_constraint.update(lowerCAmelCase)
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=lowerCAmelCase))
lowercase__ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint)
lowercase__ = None
if len(self.pending_constraints) == 0:
# we're done!
lowercase__ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints):
if pending_constraint.does_advance(lowerCAmelCase):
lowercase__, lowercase__, lowercase__ = pending_constraint.update(lowerCAmelCase)
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.')
if complete:
self.complete_constraints.append(lowerCAmelCase)
lowercase__ = None
if not complete and stepped:
lowercase__ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowercase__ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowercase__ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict=True) -> List[str]:
"""simple docstring"""
lowercase__ = ConstraintListState(self.constraints) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowercase__ = [
constraint.copy(stateful=lowerCAmelCase) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowercase__ = self.inprogress_constraint.copy(stateful=lowerCAmelCase)
lowercase__ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 642
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = None
A : Optional[int] = None
@property
def UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowerCAmelCase , 'feature_size'))
self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate'))
self.assertTrue(hasattr(lowerCAmelCase , 'padding_value'))
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(lowerCAmelCase) == len(lowerCAmelCase) for x, y in zip(lowerCAmelCase , processed_features[input_name])))
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def UpperCAmelCase ( self : str , lowerCAmelCase : str=False) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = self.feat_extract_tester.seq_length_diff
lowercase__ = self.feat_extract_tester.max_seq_length + pad_diff
lowercase__ = self.feat_extract_tester.min_seq_length
lowercase__ = self.feat_extract_tester.batch_size
lowercase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , padding=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest')
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1]))
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
lowercase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length')[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
self.assertTrue(all(len(lowerCAmelCase) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
lowercase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCAmelCase) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
lowercase__ = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1E-3)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict=False) -> str:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : str , lowerCAmelCase : Optional[Any]):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
# truncate to smallest
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]))
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to smallest with np
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to middle
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length' , truncation=lowerCAmelCase)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = 12
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , )
lowercase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase__ = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
lowercase__ = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
@require_torch
def UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='pt')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2)
@require_tf
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='tf')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2)
def UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = min(lowerCAmelCase)
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 642
| 1
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Any) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3])
self.assertEqual(x.component(0) , 1)
self.assertEqual(x.component(2) , 3)
lowercase__ = Vector()
def UpperCAmelCase ( self : Optional[Any]) -> None:
"""simple docstring"""
lowercase__ = Vector([0, 0, 0, 0, 0, 1])
self.assertEqual(str(lowerCAmelCase) , '(0,0,0,0,0,1)')
def UpperCAmelCase ( self : Any) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3, 4])
self.assertEqual(len(lowerCAmelCase) , 4)
def UpperCAmelCase ( self : Optional[int]) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2])
lowercase__ = Vector([1, 2, 3, 4, 5])
lowercase__ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
lowercase__ = Vector([1, -1, 1, -1, 2, -3, 4, -5])
self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3)
self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3)
self.assertEqual(z.euclidean_length() , 0)
self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3)
def UpperCAmelCase ( self : List[str]) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3])
lowercase__ = Vector([1, 1, 1])
self.assertEqual((x + y).component(0) , 2)
self.assertEqual((x + y).component(1) , 3)
self.assertEqual((x + y).component(2) , 4)
def UpperCAmelCase ( self : List[Any]) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3])
lowercase__ = Vector([1, 1, 1])
self.assertEqual((x - y).component(0) , 0)
self.assertEqual((x - y).component(1) , 1)
self.assertEqual((x - y).component(2) , 2)
def UpperCAmelCase ( self : List[str]) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3])
lowercase__ = Vector([2, -1, 4]) # for test of dot product
lowercase__ = Vector([1, -2, -1])
self.assertEqual(str(x * 3.0) , '(3.0,6.0,9.0)')
self.assertEqual((a * b) , 0)
def UpperCAmelCase ( self : Optional[int]) -> None:
"""simple docstring"""
self.assertEqual(str(zero_vector(10)).count('0') , 10)
def UpperCAmelCase ( self : Dict) -> None:
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1)) , '(0,1,0)')
def UpperCAmelCase ( self : Optional[Any]) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3])
lowercase__ = Vector([1, 0, 1])
self.assertEqual(str(axpy(2 , lowerCAmelCase , lowerCAmelCase)) , '(3,4,7)')
def UpperCAmelCase ( self : Tuple) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 0, 0, 0, 0, 0])
lowercase__ = x.copy()
self.assertEqual(str(lowerCAmelCase) , str(lowerCAmelCase))
def UpperCAmelCase ( self : Any) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 0, 0])
x.change_component(0 , 0)
x.change_component(1 , 1)
self.assertEqual(str(lowerCAmelCase) , '(0,1,0)')
def UpperCAmelCase ( self : Any) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(lowerCAmelCase))
def UpperCAmelCase ( self : Union[str, Any]) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase__ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(minors[x][y] , a.minor(lowerCAmelCase , lowerCAmelCase))
def UpperCAmelCase ( self : Any) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase__ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height()):
for y in range(a.width()):
self.assertEqual(cofactors[x][y] , a.cofactor(lowerCAmelCase , lowerCAmelCase))
def UpperCAmelCase ( self : Any) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(-5 , a.determinant())
def UpperCAmelCase ( self : Dict) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3)
lowercase__ = Vector([1, 2, 3])
self.assertEqual('(14,32,50)' , str(a * x))
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2))
def UpperCAmelCase ( self : Any) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
a.change_component(0 , 2 , 5)
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(lowerCAmelCase))
def UpperCAmelCase ( self : Any) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
self.assertEqual(7 , a.component(2 , 1) , 0.01)
def UpperCAmelCase ( self : Tuple) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3)
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b))
def UpperCAmelCase ( self : str) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3)
lowercase__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3)
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b))
def UpperCAmelCase ( self : Any) -> None:
"""simple docstring"""
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5)) , )
if __name__ == "__main__":
unittest.main()
| 642
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowerCAmelCase ( A__ ):
lowercase__ = prime_factors(A__ )
if is_square_free(A__ ):
return -1 if len(A__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 1
|
from __future__ import annotations
def _lowerCAmelCase ( A__ , A__ ):
if b == 0:
return (1, 0)
((lowercase__), (lowercase__)) = extended_euclid(A__ , a % b )
lowercase__ = a // b
return (y, x - k * y)
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
def _lowerCAmelCase ( A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
if b < 0:
lowercase__ = (b % n + n) % n
return b
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__, lowercase__ = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 642
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : List[str] = logging.get_logger(__name__)
a__ : List[Any] = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
A : List[str] = "focalnet"
def __init__( self : Dict , lowerCAmelCase : Union[str, Any]=2_24 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : int=3 , lowerCAmelCase : Union[str, Any]=96 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : int=[1_92, 3_84, 7_68, 7_68] , lowerCAmelCase : str=[2, 2, 6, 2] , lowerCAmelCase : Tuple=[2, 2, 2, 2] , lowerCAmelCase : Optional[Any]=[3, 3, 3, 3] , lowerCAmelCase : int="gelu" , lowerCAmelCase : Any=4.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Tuple=1E-4 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[int]=1E-5 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : str , ) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = use_conv_embed
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = focal_levels
lowercase__ = focal_windows
lowercase__ = hidden_act
lowercase__ = mlp_ratio
lowercase__ = hidden_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = use_layerscale
lowercase__ = layerscale_value
lowercase__ = use_post_layernorm
lowercase__ = use_post_layernorm_in_modulation
lowercase__ = normalize_modulator
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = encoder_stride
lowercase__ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths) + 1)]
lowercase__, lowercase__ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
| 642
| 1
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a__ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
a__ : Optional[int] = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n"
def _lowerCAmelCase ( A__ , A__ , A__=8 ):
lowercase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : UNetaDConditionModel , lowerCAmelCase : DDPMScheduler , lowerCAmelCase : VQModel , ) -> Any:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , movq=lowerCAmelCase , )
lowercase__ = 2 ** (len(self.movq.config.block_out_channels) - 1)
def UpperCAmelCase ( self : str , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple) -> Dict:
"""simple docstring"""
if latents is None:
lowercase__ = randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=lowerCAmelCase , dtype=lowerCAmelCase)
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''')
lowercase__ = latents.to(lowerCAmelCase)
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : List[str]=0) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`')
lowercase__ = torch.device(f'''cuda:{gpu_id}''')
lowercase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[str]=0) -> Dict:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0'):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.')
lowercase__ = torch.device(f'''cuda:{gpu_id}''')
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowerCAmelCase)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase__, lowercase__ = cpu_offload_with_hook(lowerCAmelCase , lowerCAmelCase , prev_module_hook=lowerCAmelCase)
# We'll offload the last model manually.
lowercase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook'):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase , '_hf_hook')
and hasattr(module._hf_hook , 'execution_device')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase)
def __call__( self : Dict , lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : int = 5_12 , lowerCAmelCase : int = 5_12 , lowerCAmelCase : int = 1_00 , lowerCAmelCase : float = 4.0 , lowerCAmelCase : int = 1 , lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , ) -> Dict:
"""simple docstring"""
lowercase__ = self._execution_device
lowercase__ = guidance_scale > 1.0
if isinstance(lowerCAmelCase , lowerCAmelCase):
lowercase__ = torch.cat(lowerCAmelCase , dim=0)
if isinstance(lowerCAmelCase , lowerCAmelCase):
lowercase__ = torch.cat(lowerCAmelCase , dim=0)
if isinstance(lowerCAmelCase , lowerCAmelCase):
lowercase__ = torch.cat(lowerCAmelCase , dim=0)
lowercase__ = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
lowercase__ = image_embeds.repeat_interleave(lowerCAmelCase , dim=0)
lowercase__ = negative_image_embeds.repeat_interleave(lowerCAmelCase , dim=0)
lowercase__ = hint.repeat_interleave(lowerCAmelCase , dim=0)
lowercase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowerCAmelCase)
lowercase__ = torch.cat([hint, hint] , dim=0).to(dtype=self.unet.dtype , device=lowerCAmelCase)
self.scheduler.set_timesteps(lowerCAmelCase , device=lowerCAmelCase)
lowercase__ = self.scheduler.timesteps
lowercase__ = self.movq.config.latent_channels
lowercase__, lowercase__ = downscale_height_and_width(lowerCAmelCase , lowerCAmelCase , self.movq_scale_factor)
# create initial latent
lowercase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCAmelCase)):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
lowercase__ = {'image_embeds': image_embeds, 'hint': hint}
lowercase__ = self.unet(
sample=lowerCAmelCase , timestep=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , added_cond_kwargs=lowerCAmelCase , return_dict=lowerCAmelCase , )[0]
if do_classifier_free_guidance:
lowercase__, lowercase__ = noise_pred.split(latents.shape[1] , dim=1)
lowercase__, lowercase__ = noise_pred.chunk(2)
lowercase__, lowercase__ = variance_pred.chunk(2)
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , 'variance_type')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__, lowercase__ = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase , )[0]
# post-processing
lowercase__ = self.movq.decode(lowerCAmelCase , force_not_quantize=lowerCAmelCase)['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''')
if output_type in ["np", "pil"]:
lowercase__ = image * 0.5 + 0.5
lowercase__ = image.clamp(0 , 1)
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCAmelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase)
| 642
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
a__ : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
a__ : Any = {"facebook/blenderbot_small-90M": 5_12}
def _lowerCAmelCase ( A__ ):
lowercase__ = set()
lowercase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ = char
lowercase__ = set(A__ )
return pairs
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[str] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : int="__start__" , lowerCAmelCase : Dict="__end__" , lowerCAmelCase : Any="__unk__" , lowerCAmelCase : str="__null__" , **lowerCAmelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase)
with open(lowerCAmelCase , encoding='utf-8') as vocab_handle:
lowercase__ = json.load(lowerCAmelCase)
lowercase__ = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase , encoding='utf-8') as merges_handle:
lowercase__ = merges_handle.read().split('\n')[1:-1]
lowercase__ = [tuple(merge.split()) for merge in merges]
lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase))))
lowercase__ = {}
@property
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return len(self.encoder)
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase ( self : str , lowerCAmelCase : str) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ = re.sub('([.,!?()])' , R' \1' , lowerCAmelCase)
lowercase__ = re.sub('(\')' , R' \1 ' , lowerCAmelCase)
lowercase__ = re.sub(R'\s{2,}' , ' ' , lowerCAmelCase)
if "\n" in token:
lowercase__ = token.replace('\n' , ' __newln__')
lowercase__ = token.split(' ')
lowercase__ = []
for token in tokens:
if not len(lowerCAmelCase):
continue
lowercase__ = token.lower()
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = tuple(list(word[:-1]) + [word[-1] + '</w>'])
lowercase__ = get_pairs(lowerCAmelCase)
if not pairs:
words.append(lowerCAmelCase)
continue
while True:
lowercase__ = min(lowerCAmelCase , key=lambda lowerCAmelCase: self.bpe_ranks.get(lowerCAmelCase , float('inf')))
if bigram not in self.bpe_ranks:
break
lowercase__, lowercase__ = bigram
lowercase__ = []
lowercase__ = 0
while i < len(lowerCAmelCase):
try:
lowercase__ = word.index(lowerCAmelCase , lowerCAmelCase)
new_word.extend(word[i:j])
lowercase__ = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(lowerCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = new_word
if len(lowerCAmelCase) == 1:
break
else:
lowercase__ = get_pairs(lowerCAmelCase)
lowercase__ = '@@ '.join(lowerCAmelCase)
lowercase__ = word[:-4]
lowercase__ = word
words.append(lowerCAmelCase)
return " ".join(lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = re.findall(R'\S+\n?' , lowerCAmelCase)
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase).split(' ')))
return split_tokens
def UpperCAmelCase ( self : int , lowerCAmelCase : str) -> int:
"""simple docstring"""
lowercase__ = token.lower()
return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token))
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self.decoder.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : List[str]) -> str:
"""simple docstring"""
lowercase__ = ' '.join(lowerCAmelCase).replace('@@ ' , '').strip()
return out_string
def UpperCAmelCase ( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(lowerCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase) + '\n')
lowercase__ = 0
with open(lowerCAmelCase , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase: kv[1]):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!')
lowercase__ = token_index
writer.write(' '.join(lowerCAmelCase) + '\n')
index += 1
return vocab_file, merge_file
| 642
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ : Union[str, Any] = {
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = ["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = ["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Optional[int] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 1
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Dict=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=99 , lowerCAmelCase : List[Any]=[1, 1, 2] , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : int=32 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : int=37 , lowerCAmelCase : Any="gelu_new" , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : str=5_12 , lowerCAmelCase : str=3 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]=False , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = block_sizes
lowercase__ = num_decoder_layers
lowercase__ = d_model
lowercase__ = n_head
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = hidden_act
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = 2
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase__ = n_head
# Used in the tests to check the size of the first hidden state
lowercase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase__ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase__ = self.num_hidden_layers + 2
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , ) -> str:
"""simple docstring"""
lowercase__ = TFFunnelForPreTraining(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForMaskedLM(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForSequenceClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = TFFunnelForMultipleChoice(config=lowerCAmelCase)
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForTokenClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForQuestionAnswering(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
A : Dict = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Optional[int] = False
A : Optional[int] = False
def UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase)
@require_tf
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
A : List[str] = False
A : int = False
def UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self , base=lowerCAmelCase)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase)
| 642
|
import heapq
import sys
import numpy as np
a__ : Dict = tuple[int, int]
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = []
lowercase__ = set()
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf')
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return len(self.elements) == 0
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]) -> List[str]:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(lowerCAmelCase)
else:
# update
# print("update", item)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
if item in self.set:
self.set.remove(lowerCAmelCase)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
self.set.remove(lowerCAmelCase)
return (priority, item)
def _lowerCAmelCase ( A__ , A__ ):
# euclidean distance
lowercase__ = np.array(A__ )
lowercase__ = np.array(A__ )
return np.linalg.norm(a - b )
def _lowerCAmelCase ( A__ , A__ ):
# integer division by time variable
return consistent_heuristic(A__ , A__ ) // t
def _lowerCAmelCase ( A__ , A__ ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = g_function[start] + Wa * heuristics[i](A__ , A__ )
return ans
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = np.chararray((n, n) )
for i in range(A__ ):
for j in range(A__ ):
lowercase__ = '*'
for i in range(A__ ):
for j in range(A__ ):
if (j, (n - 1) - i) in blocks:
lowercase__ = '#'
lowercase__ = '-'
lowercase__ = back_pointer[goal]
while x != start:
((lowercase__), (lowercase__)) = x
# print(x)
lowercase__ = '-'
lowercase__ = back_pointer[x]
lowercase__ = '-'
for i in range(A__ ):
for j in range(A__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowercase__ = back_pointer[goal]
while x != start:
print(A__ , end=' ' )
lowercase__ = back_pointer[x]
print(A__ )
sys.exit()
def _lowerCAmelCase ( A__ ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
for itera in range(A__ ):
open_list[itera].remove_element(A__ )
# print("s", s)
# print("j", j)
((lowercase__), (lowercase__)) = s
lowercase__ = (x - 1, y)
lowercase__ = (x + 1, y)
lowercase__ = (x, y + 1)
lowercase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(A__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(A__ )
lowercase__ = -1
lowercase__ = float('inf' )
if valid(A__ ) and g_function[neighbours] > g_function[s] + 1:
lowercase__ = g_function[s] + 1
lowercase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(A__ , key(A__ , 0 , A__ , A__ ) )
if neighbours not in close_list_inad:
for var in range(1 , A__ ):
if key(A__ , A__ , A__ , A__ ) <= Wa * key(
A__ , 0 , A__ , A__ ):
open_list[j].put(
A__ , key(A__ , A__ , A__ , A__ ) )
def _lowerCAmelCase ( ):
lowercase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a__ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a__ : Any = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a__ : Any = make_common_ground()
a__ : Union[str, Any] = blocks_blk
# hyper parameters
a__ : List[Any] = 1
a__ : List[str] = 1
a__ : Optional[int] = 20
a__ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a__ : Tuple = (0, 0)
a__ : str = (n - 1, n - 1)
a__ : Optional[Any] = 1
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = {start: 0, goal: float('inf' )}
lowercase__ = {start: -1, goal: -1}
lowercase__ = []
lowercase__ = set()
for i in range(A__ ):
open_list.append(PriorityQueue() )
open_list[i].put(A__ , key(A__ , A__ , A__ , A__ ) )
lowercase__ = []
lowercase__ = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , A__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__, lowercase__ = open_list[i].top_show()
visited.add(A__ )
expand_state(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_inad.append(A__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__ = open_list[0].top_show()
visited.add(A__ )
expand_state(
A__ , 0 , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_anchor.append(A__ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(A__ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 642
| 1
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _lowerCAmelCase ( A__ , A__ , A__ , A__=1_024 ):
lowercase__, lowercase__ = [], []
lowercase__ = list(zip(A__ , A__ ) )
lowercase__, lowercase__ = sorted_examples[0]
def is_too_big(A__ ):
return tok(A__ , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
lowercase__ = new_src + ' ' + src
lowercase__ = new_tgt + ' ' + tgt
if is_too_big(A__ ) or is_too_big(A__ ): # cant fit, finalize example
finished_src.append(A__ )
finished_tgt.append(A__ )
lowercase__, lowercase__ = src, tgt
else: # can fit, keep adding
lowercase__, lowercase__ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(A__ )
finished_tgt.append(A__ )
return finished_src, finished_tgt
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = Path(A__ )
save_path.mkdir(exist_ok=A__ )
for split in ["train"]:
lowercase__, lowercase__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
lowercase__ = [x.rstrip() for x in Path(A__ ).open().readlines()]
lowercase__ = [x.rstrip() for x in Path(A__ ).open().readlines()]
lowercase__, lowercase__ = pack_examples(A__ , A__ , A__ , A__ )
print(F'''packed {split} split from {len(A__ )} examples -> {len(A__ )}.''' )
Path(save_path / F'''{split}.source''' ).open('w' ).write('\n'.join(A__ ) )
Path(save_path / F'''{split}.target''' ).open('w' ).write('\n'.join(A__ ) )
for split in ["val", "test"]:
lowercase__, lowercase__ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(A__ , save_path / F'''{split}.source''' )
shutil.copyfile(A__ , save_path / F'''{split}.target''' )
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=A__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=A__ , default=128 )
parser.add_argument('--data_dir' , type=A__ )
parser.add_argument('--save_path' , type=A__ )
lowercase__ = parser.parse_args()
lowercase__ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(A__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 642
|
import math
import sys
def _lowerCAmelCase ( A__ ):
lowercase__ = ''
try:
with open(A__ , 'rb' ) as binary_file:
lowercase__ = binary_file.read()
for dat in data:
lowercase__ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = {'0': '0', '1': '1'}
lowercase__, lowercase__ = '', ''
lowercase__ = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase__ = lexicon[curr_string]
result += last_match_id
lowercase__ = last_match_id + '0'
if math.loga(A__ ).is_integer():
lowercase__ = {}
for curr_key in list(A__ ):
lowercase__ = lexicon.pop(A__ )
lowercase__ = new_lex
lowercase__ = last_match_id + '1'
index += 1
lowercase__ = ''
return result
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = 8
try:
with open(A__ , 'wb' ) as opened_file:
lowercase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(A__ ) , A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase__ = data_bits[counter:]
lowercase__ = data_bits[counter + 1 :]
return data_bits
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = read_file_binary(A__ )
lowercase__ = remove_prefix(A__ )
lowercase__ = decompress_data(A__ )
write_file_binary(A__ , A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 642
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
a__ : int = logging.get_logger(__name__)
a__ : List[Any] = {"vocab_file": "spiece.model"}
a__ : Optional[int] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple=False , lowerCAmelCase : List[str]=True , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : Optional[Any]="<s>" , lowerCAmelCase : Any="</s>" , lowerCAmelCase : Tuple="<unk>" , lowerCAmelCase : Tuple="<sep>" , lowerCAmelCase : str="<pad>" , lowerCAmelCase : Optional[Any]="<cls>" , lowerCAmelCase : int="<mask>" , lowerCAmelCase : Optional[int]=["<eop>", "<eod>"] , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : Optional[Any] , ) -> None:
"""simple docstring"""
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase , remove_space=lowerCAmelCase , keep_accents=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , additional_special_tokens=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
lowercase__ = 3
lowercase__ = do_lower_case
lowercase__ = remove_space
lowercase__ = keep_accents
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCAmelCase)
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '
'See https://pypi.org/project/jieba/ for installation.')
lowercase__ = jieba
lowercase__ = str.maketrans(' \n' , '\u2582\u2583')
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
return len(self.sp_model)
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(lowerCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Any) -> List[str]:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : Optional[Any] , lowerCAmelCase : str) -> Tuple:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase ( self : int , lowerCAmelCase : Any) -> List[str]:
"""simple docstring"""
if self.remove_space:
lowercase__ = ' '.join(inputs.strip().split())
else:
lowercase__ = inputs
lowercase__ = outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
lowercase__ = unicodedata.normalize('NFKD' , lowerCAmelCase)
lowercase__ = ''.join([c for c in outputs if not unicodedata.combining(lowerCAmelCase)])
if self.do_lower_case:
lowercase__ = outputs.lower()
return outputs
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
lowercase__ = self.preprocess_text(lowerCAmelCase)
lowercase__ = self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase)
lowercase__ = []
for piece in pieces:
if len(lowerCAmelCase) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
lowercase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
lowercase__ = cur_pieces[1:]
else:
lowercase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowerCAmelCase)
else:
new_pieces.append(lowerCAmelCase)
return new_pieces
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Tuple) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.PieceToId(lowerCAmelCase)
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Dict) -> Optional[int]:
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Dict) -> Dict:
"""simple docstring"""
lowercase__ = ''.join(lowerCAmelCase).replace(lowerCAmelCase , ' ').strip()
return out_string
def UpperCAmelCase ( self : str , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase ( self : int , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase)
if token_ids_a is not None:
return ([0] * len(lowerCAmelCase)) + [1] + ([0] * len(lowerCAmelCase)) + [1, 1]
return ([0] * len(lowerCAmelCase)) + [1, 1]
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase , 'wb') as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase)
return (out_vocab_file,)
def UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple) -> int:
"""simple docstring"""
lowercase__ = super()._decode(*lowerCAmelCase , **lowerCAmelCase)
lowercase__ = text.replace(' ' , '').replace('\u2582' , ' ').replace('\u2583' , '\n')
return text
| 642
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Tuple = {"vocab_file": "vocab.txt"}
a__ : int = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
a__ : Dict = {
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def _lowerCAmelCase ( A__ ):
with open(A__ , 'r' ) as f:
lowercase__ = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]="<unk>" , lowerCAmelCase : Dict="<cls>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : Union[str, Any]="<mask>" , lowerCAmelCase : Optional[Any]="<eos>" , **lowerCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = load_vocab_file(lowerCAmelCase)
lowercase__ = dict(enumerate(self.all_tokens))
lowercase__ = {tok: ind for ind, tok in enumerate(self.all_tokens)}
lowercase__ = unk_token
lowercase__ = cls_token
lowercase__ = pad_token
lowercase__ = mask_token
lowercase__ = eos_token
lowercase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens)
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> Dict:
"""simple docstring"""
return text.split()
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any=False) -> Union[str, Any]:
"""simple docstring"""
return len(self._id_to_token)
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens)}
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Dict , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.cls_token_id]
lowercase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!')
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List , lowerCAmelCase : Optional[List] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase__ = [1] + ([0] * len(lowerCAmelCase)) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCAmelCase) + [1]
return mask
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = os.path.join(lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt')
with open(lowerCAmelCase , 'w') as f:
f.write('\n'.join(self.all_tokens))
return (vocab_file,)
@property
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Union[List[str], List[AddedToken]] , lowerCAmelCase : bool = False) -> int:
"""simple docstring"""
return super()._add_tokens(lowerCAmelCase , special_tokens=lowerCAmelCase)
| 642
| 1
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = ["image_processor", "tokenizer"]
A : Optional[Any] = "Pix2StructImageProcessor"
A : Dict = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : str) -> Dict:
"""simple docstring"""
lowercase__ = False
super().__init__(lowerCAmelCase , lowerCAmelCase)
def __call__( self : List[Any] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[int] = 20_48 , lowerCAmelCase : int = 0 , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[str, TensorType]] = None , **lowerCAmelCase : Union[str, Any] , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.')
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase__ = self.tokenizer
lowercase__ = self.tokenizer(
text=lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , stride=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_attention_mask=lowerCAmelCase , return_overflowing_tokens=lowerCAmelCase , return_special_tokens_mask=lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , return_token_type_ids=lowerCAmelCase , return_length=lowerCAmelCase , verbose=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase__ = self.image_processor(
lowerCAmelCase , return_tensors=lowerCAmelCase , max_patches=lowerCAmelCase , **lowerCAmelCase)
else:
# add pixel_values and bbox
lowercase__ = self.image_processor(
lowerCAmelCase , return_tensors=lowerCAmelCase , max_patches=lowerCAmelCase , header_text=lowerCAmelCase , **lowerCAmelCase)
if text is not None and not self.image_processor.is_vqa:
lowercase__ = self.tokenizer(
text=lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , stride=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_attention_mask=lowerCAmelCase , return_overflowing_tokens=lowerCAmelCase , return_special_tokens_mask=lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , return_token_type_ids=lowerCAmelCase , return_length=lowerCAmelCase , verbose=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase , )
if "attention_mask" in text_encoding:
lowercase__ = text_encoding.pop('attention_mask')
if "input_ids" in text_encoding:
lowercase__ = text_encoding.pop('input_ids')
else:
lowercase__ = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase)
return encoding_image_processor
def UpperCAmelCase ( self : Tuple , *lowerCAmelCase : Any , **lowerCAmelCase : int) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , *lowerCAmelCase : Dict , **lowerCAmelCase : List[str]) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase)
@property
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 642
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a__ : int = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
a__ : Optional[Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
a__ : Tuple = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any]) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def UpperCAmelCase ( self : int , lowerCAmelCase : List[List[List[str]]] , lowerCAmelCase : List[List[str]] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase , hypotheses=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase)
}
| 642
| 1
|
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Union[str, Any] = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Optional[int] = "xlm-prophetnet"
A : Tuple = ["past_key_values"]
A : List[str] = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self : List[Any] , lowerCAmelCase : Optional[float] = 0.1 , lowerCAmelCase : Optional[Union[str, Callable]] = "gelu" , lowerCAmelCase : Optional[int] = 3_05_22 , lowerCAmelCase : Optional[int] = 10_24 , lowerCAmelCase : Optional[int] = 40_96 , lowerCAmelCase : Optional[int] = 12 , lowerCAmelCase : Optional[int] = 16 , lowerCAmelCase : Optional[int] = 40_96 , lowerCAmelCase : Optional[int] = 12 , lowerCAmelCase : Optional[int] = 16 , lowerCAmelCase : Optional[float] = 0.1 , lowerCAmelCase : Optional[float] = 0.1 , lowerCAmelCase : Optional[int] = 5_12 , lowerCAmelCase : Optional[float] = 0.02 , lowerCAmelCase : Optional[bool] = True , lowerCAmelCase : Optional[bool] = True , lowerCAmelCase : Optional[int] = 0 , lowerCAmelCase : Optional[int] = 2 , lowerCAmelCase : Optional[int] = 32 , lowerCAmelCase : Optional[int] = 1_28 , lowerCAmelCase : Optional[bool] = False , lowerCAmelCase : Optional[float] = 0.0 , lowerCAmelCase : Optional[bool] = True , lowerCAmelCase : Optional[int] = 0 , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : Optional[int] = 2 , **lowerCAmelCase : str , ) -> List[Any]:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = encoder_ffn_dim
lowercase__ = num_encoder_layers
lowercase__ = num_encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = num_decoder_layers
lowercase__ = num_decoder_attention_heads
lowercase__ = max_position_embeddings
lowercase__ = init_std # Normal(0, this parameter)
lowercase__ = activation_function
# parameters for xlmprophetnet
lowercase__ = ngram
lowercase__ = num_buckets
lowercase__ = relative_max_distance
lowercase__ = disable_ngram_loss
lowercase__ = eps
# 3 Types of Dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = dropout
lowercase__ = use_cache
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , add_cross_attention=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
@property
def UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.')
| 642
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Dict=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=99 , lowerCAmelCase : List[Any]=[1, 1, 2] , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : int=32 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : int=37 , lowerCAmelCase : Any="gelu_new" , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : str=5_12 , lowerCAmelCase : str=3 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]=False , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = block_sizes
lowercase__ = num_decoder_layers
lowercase__ = d_model
lowercase__ = n_head
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = hidden_act
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = 2
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase__ = n_head
# Used in the tests to check the size of the first hidden state
lowercase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase__ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase__ = self.num_hidden_layers + 2
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , ) -> str:
"""simple docstring"""
lowercase__ = TFFunnelForPreTraining(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForMaskedLM(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForSequenceClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = TFFunnelForMultipleChoice(config=lowerCAmelCase)
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForTokenClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForQuestionAnswering(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
A : Dict = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Optional[int] = False
A : Optional[int] = False
def UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase)
@require_tf
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
A : List[str] = False
A : int = False
def UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self , base=lowerCAmelCase)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase)
| 642
| 1
|
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def __init__( self : str , lowerCAmelCase : str , lowerCAmelCase : int=7_68) -> Dict:
"""simple docstring"""
super().__init__(lowerCAmelCase)
lowercase__ = proj_size
lowercase__ = CLIPVisionModel(lowerCAmelCase)
lowercase__ = PaintByExampleMapper(lowerCAmelCase)
lowercase__ = nn.LayerNorm(config.hidden_size)
lowercase__ = nn.Linear(config.hidden_size , self.proj_size)
# uncondition for scaling
lowercase__ = nn.Parameter(torch.randn((1, 1, self.proj_size)))
def UpperCAmelCase ( self : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int=False) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model(pixel_values=lowerCAmelCase)
lowercase__ = clip_output.pooler_output
lowercase__ = self.mapper(latent_states[:, None])
lowercase__ = self.final_layer_norm(lowerCAmelCase)
lowercase__ = self.proj_out(lowerCAmelCase)
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class UpperCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : List[Any]) -> str:
"""simple docstring"""
super().__init__()
lowercase__ = (config.num_hidden_layers + 1) // 5
lowercase__ = config.hidden_size
lowercase__ = 1
lowercase__ = nn.ModuleList(
[
BasicTransformerBlock(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , activation_fn='gelu' , attention_bias=lowerCAmelCase)
for _ in range(lowerCAmelCase)
])
def UpperCAmelCase ( self : Any , lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
for block in self.blocks:
lowercase__ = block(lowerCAmelCase)
return hidden_states
| 642
|
def _lowerCAmelCase ( A__ , A__ , A__ ):
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
lowercase__ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowercase__ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 1
|
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Dict = ProphetNetTokenizer
A : Dict = False
def UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
super().setUp()
lowercase__ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Optional[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = 'UNwant\u00E9d,running'
lowercase__ = 'unwanted, running'
return input_text, output_text
def UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
lowercase__ = self.tokenizer_class(self.vocab_file)
lowercase__ = tokenizer.tokenize('UNwant\u00E9d,running')
self.assertListEqual(lowerCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [9, 6, 7, 12, 10, 11])
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
lowercase__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz') , ['ah', '\u535A', '\u63A8', 'zz'])
def UpperCAmelCase ( self : Dict) -> str:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['hello', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hällo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['h\u00E9llo'])
def UpperCAmelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'])
def UpperCAmelCase ( self : Any) -> Dict:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'])
def UpperCAmelCase ( self : List[Any]) -> Any:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'])
def UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
lowercase__ = BasicTokenizer(do_lower_case=lowerCAmelCase , never_split=['[UNK]'])
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'])
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
lowercase__ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowercase__ = {}
for i, token in enumerate(lowerCAmelCase):
lowercase__ = i
lowercase__ = WordpieceTokenizer(vocab=lowerCAmelCase , unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize('') , [])
self.assertListEqual(tokenizer.tokenize('unwanted running') , ['un', '##want', '##ed', 'runn', '##ing'])
self.assertListEqual(tokenizer.tokenize('unwantedX running') , ['[UNK]', 'runn', '##ing'])
@require_torch
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased')
lowercase__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowercase__ = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
lowercase__ = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='pt')
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase)
lowercase__ = list(batch.input_ids.numpy()[0])
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(' '))
self.assertTrue(_is_whitespace('\t'))
self.assertTrue(_is_whitespace('\r'))
self.assertTrue(_is_whitespace('\n'))
self.assertTrue(_is_whitespace('\u00A0'))
self.assertFalse(_is_whitespace('A'))
self.assertFalse(_is_whitespace('-'))
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
self.assertTrue(_is_control('\u0005'))
self.assertFalse(_is_control('A'))
self.assertFalse(_is_control(' '))
self.assertFalse(_is_control('\t'))
self.assertFalse(_is_control('\r'))
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self.assertTrue(_is_punctuation('-'))
self.assertTrue(_is_punctuation('$'))
self.assertTrue(_is_punctuation('`'))
self.assertTrue(_is_punctuation('.'))
self.assertFalse(_is_punctuation('A'))
self.assertFalse(_is_punctuation(' '))
@slow
def UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
lowercase__ = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased')
lowercase__ = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase)
lowercase__ = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase)
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase)
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase)
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 642
|
from __future__ import annotations
def _lowerCAmelCase ( A__ , A__ ):
if b == 0:
return (1, 0)
((lowercase__), (lowercase__)) = extended_euclid(A__ , a % b )
lowercase__ = a // b
return (y, x - k * y)
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
def _lowerCAmelCase ( A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
if b < 0:
lowercase__ = (b % n + n) % n
return b
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__, lowercase__ = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 642
| 1
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
a__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCAmelCase ( A__ ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(A__ ):
return ext
raise Exception(
F'''Unable to determine file format from file extension {path}. '''
F'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def _lowerCAmelCase ( A__ ):
lowercase__ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowercase__ = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
lowercase__ = PipelineDataFormat.from_str(
format=A__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(A__ , A__ )
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : Pipeline , lowerCAmelCase : PipelineDataFormat) -> List[str]:
"""simple docstring"""
lowercase__ = nlp
lowercase__ = reader
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : ArgumentParser) -> Optional[int]:
"""simple docstring"""
lowercase__ = parser.add_parser('run' , help='Run a pipeline through the CLI')
run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run')
run_parser.add_argument('--input' , type=lowerCAmelCase , help='Path to the file to use for inference')
run_parser.add_argument('--output' , type=lowerCAmelCase , help='Path to the file that will be used post to write results.')
run_parser.add_argument('--model' , type=lowerCAmelCase , help='Name or path to the model to instantiate.')
run_parser.add_argument('--config' , type=lowerCAmelCase , help='Name or path to the model\'s config to instantiate.')
run_parser.add_argument(
'--tokenizer' , type=lowerCAmelCase , help='Name of the tokenizer to use. (default: same as the model name)')
run_parser.add_argument(
'--column' , type=lowerCAmelCase , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , )
run_parser.add_argument(
'--format' , type=lowerCAmelCase , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , )
run_parser.add_argument(
'--device' , type=lowerCAmelCase , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.')
run_parser.set_defaults(func=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__, lowercase__ = self._nlp, []
for entry in self._reader:
lowercase__ = nlp(**lowerCAmelCase) if self._reader.is_multi_columns else nlp(lowerCAmelCase)
if isinstance(lowerCAmelCase , lowerCAmelCase):
outputs.append(lowerCAmelCase)
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowercase__ = self._reader.save_binary(lowerCAmelCase)
logger.warning(f'''Current pipeline requires output to be in binary format, saving at {binary_path}''')
else:
self._reader.save(lowerCAmelCase)
| 642
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[Any] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = "umt5"
A : List[str] = ["past_key_values"]
def __init__( self : List[Any] , lowerCAmelCase : Optional[int]=25_01_12 , lowerCAmelCase : str=5_12 , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : int=32 , lowerCAmelCase : int=1_28 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=1E-6 , lowerCAmelCase : Optional[int]=1.0 , lowerCAmelCase : Optional[Any]="gated-gelu" , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]="T5Tokenizer" , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=0 , **lowerCAmelCase : int , ) -> str:
"""simple docstring"""
super().__init__(
is_encoder_decoder=lowerCAmelCase , tokenizer_class=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_kv
lowercase__ = d_ff
lowercase__ = num_layers
lowercase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ = num_heads
lowercase__ = relative_attention_num_buckets
lowercase__ = relative_attention_max_distance
lowercase__ = dropout_rate
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_factor
lowercase__ = feed_forward_proj
lowercase__ = use_cache
lowercase__ = self.feed_forward_proj.split('-')
lowercase__ = act_info[-1]
lowercase__ = act_info[0] == 'gated'
if len(lowerCAmelCase) > 1 and act_info[0] != "gated" or len(lowerCAmelCase) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
lowercase__ = 'gelu_new'
@property
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
return self.d_model
@property
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
return self.num_heads
@property
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return self.num_layers
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCAmelCase ( self : Optional[int]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase__ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowercase__ = 'past_encoder_sequence + sequence'
lowercase__ = {0: 'batch'}
lowercase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return 13
@property
def UpperCAmelCase ( self : Optional[Any]) -> float:
"""simple docstring"""
return 5E-4
| 642
| 1
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( A__ , A__ , A__=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
lowercase__ = nn.Parameter(A__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
lowercase__ = nn.Parameter(A__ )
def _lowerCAmelCase ( A__ , A__ , A__ ):
# set torch weights for 1-to-1 comparison
lowercase__ = np.asarray(weights[0] )
lowercase__ = np.asarray(weights[1] )
lowercase__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(A__ ).transpose(1 , 2 ).contiguous().view(-1 , A__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(A__ ).transpose(1 , 2 ).contiguous().view(-1 , A__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(A__ ).view(-1 , A__ ).contiguous().transpose(0 , 1 ) , )
def _lowerCAmelCase ( A__ , A__ , A__ ):
# set torch weights for 1-to-1 comparison
lowercase__ = np.asarray(weights[0] )
lowercase__ = np.asarray(weights[1] )
lowercase__ = np.asarray(weights[2] )
lowercase__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(A__ ).transpose(1 , 2 ).contiguous().view(-1 , A__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(A__ ).transpose(1 , 2 ).contiguous().view(-1 , A__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(A__ ).transpose(1 , 2 ).contiguous().view(-1 , A__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(A__ ).view(-1 , A__ ).contiguous().transpose(0 , 1 ) , )
def _lowerCAmelCase ( A__ , A__ , A__ ):
# layernorm 1
lowercase__ = weights[0][0][0]
lowercase__ = np.asarray(layer_norm_a[0] )
lowercase__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(A__ ) , torch.tensor(A__ ) , )
# lsh weights + output
lowercase__ = weights[0][1]
if len(A__ ) < 4:
set_layer_weights_in_torch_lsh(A__ , torch_block.attention , A__ )
else:
set_layer_weights_in_torch_local(A__ , torch_block.attention , A__ )
# intermediate weighs
lowercase__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(A__ ) == 4:
lowercase__ = intermediate_weights[2]
# layernorm 2
lowercase__ = np.asarray(intermediate_weights[0][0] )
lowercase__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(A__ ) , torch.tensor(A__ ) , )
# intermediate dense
lowercase__ = np.asarray(intermediate_weights[1][0] )
lowercase__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(A__ ).transpose(0 , 1 ).contiguous() , torch.tensor(A__ ) , )
# intermediate out
lowercase__ = np.asarray(intermediate_weights[4][0] )
lowercase__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(A__ ).transpose(0 , 1 ).contiguous() , torch.tensor(A__ ) , )
def _lowerCAmelCase ( A__ , A__ , A__ ):
# reformer model
lowercase__ = torch_model.reformer
# word embeds
lowercase__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(A__ ) , )
if isinstance(weights[3] , A__ ):
lowercase__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowercase__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
lowercase__ = nn.Parameter(torch.tensor(A__ ) )
lowercase__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
A__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowercase__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(A__ , A__ , A__ )
# output layer norm
lowercase__ = np.asarray(weights[7][0] )
lowercase__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(A__ ) , torch.tensor(A__ ) , )
# output embeddings
lowercase__ = np.asarray(weights[9][0] )
lowercase__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(A__ ).transpose(0 , 1 ).contiguous() , torch.tensor(A__ ) , )
def _lowerCAmelCase ( A__ , A__ , A__ ):
# Initialise PyTorch model
lowercase__ = ReformerConfig.from_json_file(A__ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase__ = ReformerModelWithLMHead(A__ )
with open(A__ , 'rb' ) as f:
lowercase__ = pickle.load(A__ )['weights']
set_model_weights_in_torch(A__ , A__ , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a__ : Optional[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 642
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : str = XGLMTokenizer
A : List[Any] = XGLMTokenizerFast
A : int = True
A : Optional[Any] = True
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = '<pad>'
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase) , lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase) , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(len(lowerCAmelCase) , 10_08)
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_08)
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
lowercase__ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return XGLMTokenizer.from_pretrained('facebook/xglm-564M')
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase , f.name)
lowercase__ = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase)
lowercase__ = pickle.dumps(lowerCAmelCase)
pickle.loads(lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = 'I was born in 92000, and this is falsé.'
lowercase__ = tokenizer.tokenize(lowerCAmelCase)
lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
@slow
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
lowercase__ = 'Hello World!'
lowercase__ = [2, 3_12_27, 44_47, 35]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowercase__ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = {
'input_ids': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name='facebook/xglm-564M' , padding=lowerCAmelCase , )
| 642
| 1
|
import os
def _lowerCAmelCase ( ):
with open(os.path.dirname(A__ ) + '/p022_names.txt' ) as file:
lowercase__ = str(file.readlines()[0] )
lowercase__ = names.replace('"' , '' ).split(',' )
names.sort()
lowercase__ = 0
lowercase__ = 0
for i, name in enumerate(A__ ):
for letter in name:
name_score += ord(A__ ) - 64
total_score += (i + 1) * name_score
lowercase__ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 642
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
lowercase__ = data
lowercase__ = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0]
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = B'\x80' + B'\x00' * (63 - (len(self.data) + 8) % 64)
lowercase__ = self.data + padding + struct.pack('>Q' , 8 * len(self.data))
return padded_data
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64)
]
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> List[Any]:
"""simple docstring"""
lowercase__ = list(struct.unpack('>16L' , lowerCAmelCase)) + [0] * 64
for i in range(16 , 80):
lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1)
return w
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.padding()
lowercase__ = self.split_blocks()
for block in self.blocks:
lowercase__ = self.expand_block(lowerCAmelCase)
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.h
for i in range(0 , 80):
if 0 <= i < 20:
lowercase__ = (b & c) | ((~b) & d)
lowercase__ = 0X5_A_8_2_7_9_9_9
elif 20 <= i < 40:
lowercase__ = b ^ c ^ d
lowercase__ = 0X6_E_D_9_E_B_A_1
elif 40 <= i < 60:
lowercase__ = (b & c) | (b & d) | (c & d)
lowercase__ = 0X8_F_1_B_B_C_D_C
elif 60 <= i < 80:
lowercase__ = b ^ c ^ d
lowercase__ = 0XC_A_6_2_C_1_D_6
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = (
self.rotate(lowerCAmelCase , 5) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F,
a,
self.rotate(lowerCAmelCase , 30),
c,
d,
)
lowercase__ = (
self.h[0] + a & 0XF_F_F_F_F_F_F_F,
self.h[1] + b & 0XF_F_F_F_F_F_F_F,
self.h[2] + c & 0XF_F_F_F_F_F_F_F,
self.h[3] + d & 0XF_F_F_F_F_F_F_F,
self.h[4] + e & 0XF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h)
def _lowerCAmelCase ( ):
lowercase__ = B'Test String'
assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowercase__ = parser.parse_args()
lowercase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase__ = f.read()
else:
lowercase__ = bytes(A__ , 'utf-8' )
print(SHAaHash(A__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 642
| 1
|
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
a__ : Union[str, Any] = logging.get_logger("transformers.models.speecht5")
a__ : Dict = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
a__ : str = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
a__ : str = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
a__ : Tuple = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
a__ : Optional[int] = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
a__ : Dict = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
a__ : List[Any] = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
a__ : str = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
a__ : str = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
a__ : List[Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a__ : Dict = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a__ : Union[str, Any] = []
a__ : Tuple = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
a__ : List[Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
a__ : int = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
a__ : Dict = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ ):
for attribute in key.split('.' ):
lowercase__ = getattr(A__ , A__ )
if weight_type is not None:
lowercase__ = getattr(A__ , A__ ).shape
else:
lowercase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
elif weight_type == "running_mean":
lowercase__ = value
elif weight_type == "running_var":
lowercase__ = value
elif weight_type == "num_batches_tracked":
lowercase__ = value
else:
lowercase__ = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def _lowerCAmelCase ( A__ , A__ ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowercase__, lowercase__ = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = []
if task == "s2t":
lowercase__ = hf_model.speechta.encoder.prenet.feature_encoder
lowercase__ = MAPPING_S2T
lowercase__ = IGNORE_KEYS_S2T
elif task == "t2s":
lowercase__ = None
lowercase__ = MAPPING_T2S
lowercase__ = IGNORE_KEYS_T2S
elif task == "s2s":
lowercase__ = hf_model.speechta.encoder.prenet.feature_encoder
lowercase__ = MAPPING_S2S
lowercase__ = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(A__ , A__ ):
logger.info(F'''{name} was ignored''' )
continue
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
lowercase__, lowercase__ = key.split('.*.' )
if prefix in name and suffix in name:
lowercase__ = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(A__ )[0].split('.' )[-2]
lowercase__ = mapped_key.replace('*' , A__ )
if "weight_g" in name:
lowercase__ = 'weight_g'
elif "weight_v" in name:
lowercase__ = 'weight_v'
elif "bias" in name:
lowercase__ = 'bias'
elif "weight" in name:
lowercase__ = 'weight'
elif "running_mean" in name:
lowercase__ = 'running_mean'
elif "running_var" in name:
lowercase__ = 'running_var'
elif "num_batches_tracked" in name:
lowercase__ = 'num_batches_tracked'
else:
lowercase__ = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ ):
lowercase__ = full_name.split('conv_layers.' )[-1]
lowercase__ = name.split('.' )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A__ )
@torch.no_grad()
def _lowerCAmelCase ( A__ , A__ , A__ , A__=None , A__=None , A__=None , ):
if config_path is not None:
lowercase__ = SpeechTaConfig.from_pretrained(A__ )
else:
lowercase__ = SpeechTaConfig()
if task == "s2t":
lowercase__ = config.max_text_positions
lowercase__ = SpeechTaForSpeechToText(A__ )
elif task == "t2s":
lowercase__ = 1_876
lowercase__ = 600
lowercase__ = config.max_speech_positions
lowercase__ = SpeechTaForTextToSpeech(A__ )
elif task == "s2s":
lowercase__ = 1_876
lowercase__ = config.max_speech_positions
lowercase__ = SpeechTaForSpeechToSpeech(A__ )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
lowercase__ = SpeechTaTokenizer(A__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
lowercase__ = AddedToken('<mask>' , lstrip=A__ , rstrip=A__ )
lowercase__ = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
lowercase__ = SpeechTaFeatureExtractor()
lowercase__ = SpeechTaProcessor(tokenizer=A__ , feature_extractor=A__ )
processor.save_pretrained(A__ )
lowercase__ = torch.load(A__ )
recursively_load_weights(fairseq_checkpoint['model'] , A__ , A__ )
model.save_pretrained(A__ )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(A__ )
model.push_to_hub(A__ )
if __name__ == "__main__":
a__ : Any = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
a__ : str = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 642
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
a__ : List[Any] = logging.get_logger(__name__)
a__ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
a__ : List[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
a__ : int = {
"facebook/bart-base": 10_24,
"facebook/bart-large": 10_24,
"facebook/bart-large-mnli": 10_24,
"facebook/bart-large-cnn": 10_24,
"facebook/bart-large-xsum": 10_24,
"yjernite/bart_eli5": 10_24,
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Optional[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = ["input_ids", "attention_mask"]
A : Any = BartTokenizer
def __init__( self : List[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str="replace" , lowerCAmelCase : str="<s>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : str="<unk>" , lowerCAmelCase : int="<pad>" , lowerCAmelCase : int="<mask>" , lowerCAmelCase : Dict=False , lowerCAmelCase : List[Any]=True , **lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = getattr(lowerCAmelCase , pre_tok_state.pop('type'))
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**lowerCAmelCase)
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = 'post_processor'
lowercase__ = getattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state['sep'])
if "cls" in state:
lowercase__ = tuple(state['cls'])
lowercase__ = False
if state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get('trim_offsets' , lowerCAmelCase) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(lowerCAmelCase , state.pop('type'))
lowercase__ = component_class(**lowerCAmelCase)
setattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
@property
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else value
lowercase__ = value
def UpperCAmelCase ( self : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[int]) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase)
return tuple(lowerCAmelCase)
def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=None) -> Tuple:
"""simple docstring"""
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 642
| 1
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : Any , lowerCAmelCase : Dict=13 , lowerCAmelCase : Optional[Any]=7 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : int=False , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=99 , lowerCAmelCase : List[str]=32 , lowerCAmelCase : str=5 , lowerCAmelCase : str=4 , lowerCAmelCase : Tuple=37 , lowerCAmelCase : Any="gelu" , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : int=0.1 , lowerCAmelCase : Dict=5_12 , lowerCAmelCase : Optional[int]=16 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Dict=0.02 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Dict=4 , lowerCAmelCase : List[Any]=None , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , use_stable_embedding=lowerCAmelCase , )
def UpperCAmelCase ( self : int , lowerCAmelCase : Tuple , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = OpenLlamaModel(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = model(lowerCAmelCase , attention_mask=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , ) -> List[Any]:
"""simple docstring"""
lowercase__ = True
lowercase__ = OpenLlamaModel(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , )
lowercase__ = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , )
lowercase__ = model(lowerCAmelCase , attention_mask=lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : int , ) -> List[str]:
"""simple docstring"""
lowercase__ = OpenLlamaForCausalLM(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , ) -> Dict:
"""simple docstring"""
lowercase__ = True
lowercase__ = True
lowercase__ = OpenLlamaForCausalLM(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
# first forward pass
lowercase__ = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , use_cache=lowerCAmelCase , )
lowercase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ = ids_tensor((self.batch_size, 3) , config.vocab_size)
lowercase__ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
lowercase__ = torch.cat([input_ids, next_tokens] , dim=-1)
lowercase__ = torch.cat([input_mask, next_mask] , dim=-1)
lowercase__ = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , output_hidden_states=lowerCAmelCase , )['hidden_states'][0]
lowercase__ = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , output_hidden_states=lowerCAmelCase , )['hidden_states'][0]
# select random slice
lowercase__ = ids_tensor((1,) , output_from_past.shape[-1]).item()
lowercase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3))
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Dict = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
A : int = (OpenLlamaForCausalLM,) if is_torch_available() else ()
A : List[str] = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
A : str = False
A : Tuple = False
def UpperCAmelCase ( self : List[Any]) -> Any:
"""simple docstring"""
lowercase__ = OpenLlamaModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37)
def UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ = type
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = 3
lowercase__ = input_dict['input_ids']
lowercase__ = input_ids.ne(1).to(lowerCAmelCase)
lowercase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
lowercase__ = OpenLlamaForSequenceClassification(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = 3
lowercase__ = 'single_label_classification'
lowercase__ = input_dict['input_ids']
lowercase__ = input_ids.ne(1).to(lowerCAmelCase)
lowercase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
lowercase__ = OpenLlamaForSequenceClassification(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = 3
lowercase__ = 'multi_label_classification'
lowercase__ = input_dict['input_ids']
lowercase__ = input_ids.ne(1).to(lowerCAmelCase)
lowercase__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
lowercase__ = OpenLlamaForSequenceClassification(lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test')
def UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)])
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[str]) -> List[str]:
"""simple docstring"""
lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = ids_tensor([1, 10] , config.vocab_size)
lowercase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
lowercase__ = OpenLlamaModel(lowerCAmelCase)
original_model.to(lowerCAmelCase)
original_model.eval()
lowercase__ = original_model(lowerCAmelCase).last_hidden_state
lowercase__ = original_model(lowerCAmelCase).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
lowercase__ = {'type': scaling_type, 'factor': 10.0}
lowercase__ = OpenLlamaModel(lowerCAmelCase)
scaled_model.to(lowerCAmelCase)
scaled_model.eval()
lowercase__ = scaled_model(lowerCAmelCase).last_hidden_state
lowercase__ = scaled_model(lowerCAmelCase).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5))
else:
self.assertFalse(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5))
| 642
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : str = (DDIMParallelScheduler,)
A : Any = (("eta", 0.0), ("num_inference_steps", 50))
def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = {
'num_train_timesteps': 10_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**lowerCAmelCase)
return config
def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**lowerCAmelCase)
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase)
for t in scheduler.timesteps:
lowercase__ = model(lowerCAmelCase , lowerCAmelCase)
lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase).prev_sample
return sample
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase)
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(steps_offset=1)
lowercase__ = scheduler_class(**lowerCAmelCase)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1]))
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , )
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00]):
self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00) - 0.1_47_71)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60) - 0.3_24_60)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98) - 0.02)) < 1E-5
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
scheduler.set_timesteps(lowerCAmelCase)
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = self.dummy_sample_deter + 0.1
lowercase__ = self.dummy_sample_deter - 0.1
lowercase__ = samplea.shape[0]
lowercase__ = torch.stack([samplea, samplea, samplea] , dim=0)
lowercase__ = torch.arange(lowerCAmelCase)[0:3, None].repeat(1 , lowerCAmelCase)
lowercase__ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowercase__ = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , lowerCAmelCase)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 11_47.79_04) < 1E-2
assert abs(result_mean.item() - 0.49_82) < 1E-3
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
lowercase__ = self.full_loop()
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_72.00_67) < 1E-2
assert abs(result_mean.item() - 0.22_39_67) < 1E-3
def UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(prediction_type='v_prediction')
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 52.53_02) < 1E-2
assert abs(result_mean.item() - 0.06_84) < 1E-3
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.82_95) < 1E-2
assert abs(result_mean.item() - 0.19_51) < 1E-3
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.07_84) < 1E-2
assert abs(result_mean.item() - 0.19_41) < 1E-3
| 642
| 1
|
from __future__ import annotations
def _lowerCAmelCase ( A__ , A__ ):
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
lowercase__ = number_of_bytes // partitions
lowercase__ = []
for i in range(A__ ):
lowercase__ = i * bytes_per_partition + 1
lowercase__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
|
import cva
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : float , lowerCAmelCase : int) -> Dict:
"""simple docstring"""
if k in (0.04, 0.06):
lowercase__ = k
lowercase__ = window_size
else:
raise ValueError('invalid k value')
def __str__( self : Tuple) -> str:
"""simple docstring"""
return str(self.k)
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowercase__ = cva.imread(lowerCAmelCase , 0)
lowercase__, lowercase__ = img.shape
lowercase__ = []
lowercase__ = img.copy()
lowercase__ = cva.cvtColor(lowerCAmelCase , cva.COLOR_GRAY2RGB)
lowercase__, lowercase__ = np.gradient(lowerCAmelCase)
lowercase__ = dx**2
lowercase__ = dy**2
lowercase__ = dx * dy
lowercase__ = 0.04
lowercase__ = self.window_size // 2
for y in range(lowerCAmelCase , h - offset):
for x in range(lowerCAmelCase , w - offset):
lowercase__ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = (wxx * wyy) - (wxy**2)
lowercase__ = wxx + wyy
lowercase__ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0) , 0)
color_img.itemset((y, x, 1) , 0)
color_img.itemset((y, x, 2) , 2_55)
return color_img, corner_list
if __name__ == "__main__":
a__ : Dict = HarrisCorner(0.0_4, 3)
a__ , a__ : Dict = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 642
| 1
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Dict = logging.get_logger(__name__)
set_seed(7_70)
a__ : Optional[int] = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
a__ : Dict = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
a__ : Optional[Any] = os.path.dirname(os.path.abspath(__file__))
a__ : Dict = os.path.join(os.path.expanduser("~"), ".cache")
a__ : str = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def _lowerCAmelCase ( A__ , A__=False ):
lowercase__ = model_type
if use_small:
key += "_small"
return os.path.join(A__ , REMOTE_MODEL_PATHS[key]['file_name'] )
def _lowerCAmelCase ( A__ , A__ ):
os.makedirs(A__ , exist_ok=A__ )
hf_hub_download(repo_id=A__ , filename=A__ , local_dir=A__ )
def _lowerCAmelCase ( A__ , A__ , A__=False , A__="text" ):
if model_type == "text":
lowercase__ = BarkSemanticModel
lowercase__ = BarkSemanticConfig
lowercase__ = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowercase__ = BarkCoarseModel
lowercase__ = BarkCoarseConfig
lowercase__ = BarkCoarseGenerationConfig
elif model_type == "fine":
lowercase__ = BarkFineModel
lowercase__ = BarkFineConfig
lowercase__ = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowercase__ = F'''{model_type}_small''' if use_small else model_type
lowercase__ = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(A__ ):
logger.info(F'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info['repo_id'] , model_info['file_name'] )
lowercase__ = torch.load(A__ , map_location=A__ )
# this is a hack
lowercase__ = checkpoint['model_args']
if "input_vocab_size" not in model_args:
lowercase__ = model_args['vocab_size']
lowercase__ = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowercase__ = model_args.pop('n_head' )
lowercase__ = model_args.pop('n_embd' )
lowercase__ = model_args.pop('n_layer' )
lowercase__ = ConfigClass(**checkpoint['model_args'] )
lowercase__ = ModelClass(config=A__ )
lowercase__ = GenerationConfigClass()
lowercase__ = model_generation_config
lowercase__ = checkpoint['model']
# fixup checkpoint
lowercase__ = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(A__ ):
# replace part of the key with corresponding layer name in HF implementation
lowercase__ = k[len(A__ ) :]
for old_layer_name in new_layer_name_dict:
lowercase__ = new_k.replace(A__ , new_layer_name_dict[old_layer_name] )
lowercase__ = state_dict.pop(A__ )
lowercase__ = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowercase__ = {k for k in extra_keys if not k.endswith('.attn.bias' )}
lowercase__ = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowercase__ = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(A__ ) != 0:
raise ValueError(F'''extra keys found: {extra_keys}''' )
if len(A__ ) != 0:
raise ValueError(F'''missing keys: {missing_keys}''' )
model.load_state_dict(A__ , strict=A__ )
lowercase__ = model.num_parameters(exclude_embeddings=A__ )
lowercase__ = checkpoint['best_val_loss'].item()
logger.info(F'''model loaded: {round(n_params/1E6 , 1 )}M params, {round(A__ , 3 )} loss''' )
model.eval()
model.to(A__ )
del checkpoint, state_dict
return model
def _lowerCAmelCase ( A__ , A__=False , A__="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowercase__ = 'cpu' # do conversion on cpu
lowercase__ = _get_ckpt_path(A__ , use_small=A__ )
lowercase__ = _load_model(A__ , A__ , model_type=A__ , use_small=A__ )
# load bark initial model
lowercase__ = _bark_load_model(A__ , 'cpu' , model_type=A__ , use_small=A__ )
if model_type == "text":
lowercase__ = bark_model['model']
if model.num_parameters(exclude_embeddings=A__ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
lowercase__ = 5
lowercase__ = 10
if model_type in ["text", "coarse"]:
lowercase__ = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
lowercase__ = bark_model(A__ )[0]
lowercase__ = model(A__ )
# take last logits
lowercase__ = output_new_model_total.logits[:, [-1], :]
else:
lowercase__ = 3
lowercase__ = 8
lowercase__ = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowercase__ = model(A__ , A__ )
lowercase__ = bark_model(A__ , A__ )
lowercase__ = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('initial and new outputs are not equal' )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , ):
lowercase__ = os.path.join(A__ , A__ )
lowercase__ = BarkSemanticConfig.from_pretrained(os.path.join(A__ , 'config.json' ) )
lowercase__ = BarkCoarseConfig.from_pretrained(os.path.join(A__ , 'config.json' ) )
lowercase__ = BarkFineConfig.from_pretrained(os.path.join(A__ , 'config.json' ) )
lowercase__ = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
lowercase__ = BarkSemanticModel.from_pretrained(A__ )
lowercase__ = BarkCoarseModel.from_pretrained(A__ )
lowercase__ = BarkFineModel.from_pretrained(A__ )
lowercase__ = EncodecModel.from_pretrained('facebook/encodec_24khz' )
lowercase__ = BarkConfig.from_sub_model_configs(
A__ , A__ , A__ , A__ )
lowercase__ = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowercase__ = BarkModel(A__ )
lowercase__ = semantic
lowercase__ = coarseAcoustic
lowercase__ = fineAcoustic
lowercase__ = codec
lowercase__ = bark_generation_config
Path(A__ ).mkdir(exist_ok=A__ )
bark.save_pretrained(A__ , repo_id=A__ , push_to_hub=A__ )
if __name__ == "__main__":
a__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
a__ : str = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 642
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : List[Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : int = "speech_to_text"
A : Optional[Any] = ["past_key_values"]
A : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[int] , lowerCAmelCase : Tuple=1_00_00 , lowerCAmelCase : int=12 , lowerCAmelCase : int=20_48 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : str=6 , lowerCAmelCase : Dict=20_48 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict="relu" , lowerCAmelCase : Tuple=2_56 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Any=60_00 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[Any]=(5, 5) , lowerCAmelCase : Union[str, Any]=10_24 , lowerCAmelCase : List[Any]=80 , lowerCAmelCase : List[str]=1 , **lowerCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_source_positions
lowercase__ = max_target_positions
lowercase__ = num_conv_layers
lowercase__ = list(lowerCAmelCase)
lowercase__ = conv_channels
lowercase__ = input_feat_per_channel
lowercase__ = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''')
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 642
| 1
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = ''
lowercase__ = ''
lowercase__ = []
lowercase__ = 0
lowercase__ = 2_56
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
def UpperCAmelCase ( self : str , lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
lowercase__ = cva.imread(lowerCAmelCase , 0)
lowercase__ = copy.deepcopy(self.img)
lowercase__, lowercase__, lowercase__ = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x')
lowercase__ = np.sum(lowerCAmelCase)
for i in range(len(lowerCAmelCase)):
lowercase__ = x[i] / self.k
self.sk += prk
lowercase__ = (self.L - 1) * self.sk
if self.rem != 0:
lowercase__ = int(last % last)
lowercase__ = int(last + 1 if self.rem >= 0.5 else last)
self.last_list.append(lowerCAmelCase)
lowercase__ = int(np.ma.count(self.img) / self.img[1].size)
lowercase__ = self.img[1].size
for i in range(self.number_of_cols):
for j in range(self.number_of_rows):
lowercase__ = self.img[j][i]
if num != self.last_list[num]:
lowercase__ = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img)
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
plt.hist(self.img.ravel() , 2_56 , [0, 2_56])
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
cva.imshow('Output-Image' , self.img)
cva.imshow('Input-Image' , self.original_image)
cva.waitKey(50_00)
cva.destroyAllWindows()
if __name__ == "__main__":
a__ : Dict = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
a__ : int = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 642
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Any = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 1
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
a__ : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
a__ : Any = {"facebook/blenderbot_small-90M": 5_12}
def _lowerCAmelCase ( A__ ):
lowercase__ = set()
lowercase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ = char
lowercase__ = set(A__ )
return pairs
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[str] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : int="__start__" , lowerCAmelCase : Dict="__end__" , lowerCAmelCase : Any="__unk__" , lowerCAmelCase : str="__null__" , **lowerCAmelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase)
with open(lowerCAmelCase , encoding='utf-8') as vocab_handle:
lowercase__ = json.load(lowerCAmelCase)
lowercase__ = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase , encoding='utf-8') as merges_handle:
lowercase__ = merges_handle.read().split('\n')[1:-1]
lowercase__ = [tuple(merge.split()) for merge in merges]
lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase))))
lowercase__ = {}
@property
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return len(self.encoder)
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase ( self : str , lowerCAmelCase : str) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ = re.sub('([.,!?()])' , R' \1' , lowerCAmelCase)
lowercase__ = re.sub('(\')' , R' \1 ' , lowerCAmelCase)
lowercase__ = re.sub(R'\s{2,}' , ' ' , lowerCAmelCase)
if "\n" in token:
lowercase__ = token.replace('\n' , ' __newln__')
lowercase__ = token.split(' ')
lowercase__ = []
for token in tokens:
if not len(lowerCAmelCase):
continue
lowercase__ = token.lower()
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = tuple(list(word[:-1]) + [word[-1] + '</w>'])
lowercase__ = get_pairs(lowerCAmelCase)
if not pairs:
words.append(lowerCAmelCase)
continue
while True:
lowercase__ = min(lowerCAmelCase , key=lambda lowerCAmelCase: self.bpe_ranks.get(lowerCAmelCase , float('inf')))
if bigram not in self.bpe_ranks:
break
lowercase__, lowercase__ = bigram
lowercase__ = []
lowercase__ = 0
while i < len(lowerCAmelCase):
try:
lowercase__ = word.index(lowerCAmelCase , lowerCAmelCase)
new_word.extend(word[i:j])
lowercase__ = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(lowerCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = new_word
if len(lowerCAmelCase) == 1:
break
else:
lowercase__ = get_pairs(lowerCAmelCase)
lowercase__ = '@@ '.join(lowerCAmelCase)
lowercase__ = word[:-4]
lowercase__ = word
words.append(lowerCAmelCase)
return " ".join(lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = re.findall(R'\S+\n?' , lowerCAmelCase)
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase).split(' ')))
return split_tokens
def UpperCAmelCase ( self : int , lowerCAmelCase : str) -> int:
"""simple docstring"""
lowercase__ = token.lower()
return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token))
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self.decoder.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : List[str]) -> str:
"""simple docstring"""
lowercase__ = ' '.join(lowerCAmelCase).replace('@@ ' , '').strip()
return out_string
def UpperCAmelCase ( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(lowerCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase) + '\n')
lowercase__ = 0
with open(lowerCAmelCase , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase: kv[1]):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!')
lowercase__ = token_index
writer.write(' '.join(lowerCAmelCase) + '\n')
index += 1
return vocab_file, merge_file
| 642
|
# Imports
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None) -> Dict:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : str=None , lowerCAmelCase : str=None) -> int:
"""simple docstring"""
if red is not None:
lowercase__ = red
if green is not None:
lowercase__ = green
if blue is not None:
lowercase__ = blue
if red_edge is not None:
lowercase__ = red_edge
if nir is not None:
lowercase__ = nir
return True
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Union[str, Any]="" , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None) -> Union[str, Any]:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
lowercase__ = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!')
return False
def UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any]=0.08 , lowerCAmelCase : Optional[int]=1.22 , lowerCAmelCase : int=0.03) -> List[Any]:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return (self.nir / self.green) - 1
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
return self.nir - self.green
def UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def UpperCAmelCase ( self : int , lowerCAmelCase : int=0.16) -> Dict:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self : str , lowerCAmelCase : Optional[int]=0.5) -> Union[str, Any]:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None) -> Tuple:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
lowercase__ = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
lowercase__ = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def UpperCAmelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 642
| 1
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
a__ : Dict = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
a__ : Union[str, Any] = concatenate_datasets
a__ : Union[str, Any] = DownloadConfig
a__ : List[Any] = DownloadManager
a__ : Any = DownloadMode
a__ : Tuple = DownloadConfig
a__ : int = DownloadMode
a__ : Optional[Any] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 642
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCAmelCase__( unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = load_tool('text-classification')
self.tool.setup()
lowercase__ = load_tool('text-classification' , remote=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
lowercase__ = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
lowercase__ = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
| 642
| 1
|
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a__ : Optional[int] = 6_3_7_8_1_3_7.0
a__ : Union[str, Any] = 6_3_5_6_7_5_2.3_1_4_2_4_5
a__ : Optional[Any] = 6_37_81_37
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowercase__ = atan((1 - flattening) * tan(radians(A__ ) ) )
lowercase__ = atan((1 - flattening) * tan(radians(A__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowercase__ = haversine_distance(A__ , A__ , A__ , A__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowercase__ = (b_lata + b_lata) / 2
lowercase__ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowercase__ = (sin(A__ ) ** 2) * (cos(A__ ) ** 2)
lowercase__ = cos(sigma / 2 ) ** 2
lowercase__ = (sigma - sin(A__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowercase__ = (cos(A__ ) ** 2) * (sin(A__ ) ** 2)
lowercase__ = sin(sigma / 2 ) ** 2
lowercase__ = (sigma + sin(A__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = None
A : Optional[int] = None
@property
def UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowerCAmelCase , 'feature_size'))
self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate'))
self.assertTrue(hasattr(lowerCAmelCase , 'padding_value'))
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(lowerCAmelCase) == len(lowerCAmelCase) for x, y in zip(lowerCAmelCase , processed_features[input_name])))
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def UpperCAmelCase ( self : str , lowerCAmelCase : str=False) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = self.feat_extract_tester.seq_length_diff
lowercase__ = self.feat_extract_tester.max_seq_length + pad_diff
lowercase__ = self.feat_extract_tester.min_seq_length
lowercase__ = self.feat_extract_tester.batch_size
lowercase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , padding=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest')
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1]))
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
lowercase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length')[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
self.assertTrue(all(len(lowerCAmelCase) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
lowercase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCAmelCase) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
lowercase__ = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1E-3)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict=False) -> str:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : str , lowerCAmelCase : Optional[Any]):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
# truncate to smallest
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]))
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to smallest with np
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to middle
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length' , truncation=lowerCAmelCase)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = 12
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , )
lowercase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase__ = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
lowercase__ = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
@require_torch
def UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='pt')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2)
@require_tf
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='tf')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2)
def UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = min(lowerCAmelCase)
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 642
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ : List[str] = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowerCAmelCase ( A__ ):
lowercase__ = prime_factors(A__ )
if is_square_free(A__ ):
return -1 if len(A__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 1
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[str] = ["image_processor", "tokenizer"]
A : List[Any] = "AutoImageProcessor"
A : Dict = "AutoTokenizer"
def __init__( self : Tuple , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : Dict) -> List[Any]:
"""simple docstring"""
lowercase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase , )
lowercase__ = kwargs.pop('feature_extractor')
lowercase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(lowerCAmelCase , lowerCAmelCase)
lowercase__ = self.image_processor
lowercase__ = False
def __call__( self : Any , *lowerCAmelCase : Dict , **lowerCAmelCase : Union[str, Any]) -> Tuple:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase , **lowerCAmelCase)
lowercase__ = kwargs.pop('images' , lowerCAmelCase)
lowercase__ = kwargs.pop('text' , lowerCAmelCase)
if len(lowerCAmelCase) > 0:
lowercase__ = args[0]
lowercase__ = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.')
if images is not None:
lowercase__ = self.image_processor(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase)
if text is not None:
lowercase__ = self.tokenizer(lowerCAmelCase , **lowerCAmelCase)
if text is None:
return inputs
elif images is None:
return encodings
else:
lowercase__ = encodings['input_ids']
return inputs
def UpperCAmelCase ( self : Tuple , *lowerCAmelCase : int , **lowerCAmelCase : Any) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : str , *lowerCAmelCase : List[str] , **lowerCAmelCase : Any) -> Dict:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase)
@contextmanager
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.')
lowercase__ = True
lowercase__ = self.tokenizer
yield
lowercase__ = self.image_processor
lowercase__ = False
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple=False , lowerCAmelCase : int=None) -> Optional[Any]:
"""simple docstring"""
if added_vocab is None:
lowercase__ = self.tokenizer.get_added_vocab()
lowercase__ = {}
while tokens:
lowercase__ = re.search(R'<s_(.*?)>' , lowerCAmelCase , re.IGNORECASE)
if start_token is None:
break
lowercase__ = start_token.group(1)
lowercase__ = re.search(Rf'''</s_{key}>''' , lowerCAmelCase , re.IGNORECASE)
lowercase__ = start_token.group()
if end_token is None:
lowercase__ = tokens.replace(lowerCAmelCase , '')
else:
lowercase__ = end_token.group()
lowercase__ = re.escape(lowerCAmelCase)
lowercase__ = re.escape(lowerCAmelCase)
lowercase__ = re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''' , lowerCAmelCase , re.IGNORECASE)
if content is not None:
lowercase__ = content.group(1).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowercase__ = self.tokenajson(lowerCAmelCase , is_inner_value=lowerCAmelCase , added_vocab=lowerCAmelCase)
if value:
if len(lowerCAmelCase) == 1:
lowercase__ = value[0]
lowercase__ = value
else: # leaf nodes
lowercase__ = []
for leaf in content.split(R'<sep/>'):
lowercase__ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowercase__ = leaf[1:-2] # for categorical special tokens
output[key].append(lowerCAmelCase)
if len(output[key]) == 1:
lowercase__ = output[key][0]
lowercase__ = tokens[tokens.find(lowerCAmelCase) + len(lowerCAmelCase) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=lowerCAmelCase , added_vocab=lowerCAmelCase)
if len(lowerCAmelCase):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCAmelCase , )
return self.image_processor
| 642
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : List[str] = logging.get_logger(__name__)
a__ : List[Any] = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
A : List[str] = "focalnet"
def __init__( self : Dict , lowerCAmelCase : Union[str, Any]=2_24 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : int=3 , lowerCAmelCase : Union[str, Any]=96 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : int=[1_92, 3_84, 7_68, 7_68] , lowerCAmelCase : str=[2, 2, 6, 2] , lowerCAmelCase : Tuple=[2, 2, 2, 2] , lowerCAmelCase : Optional[Any]=[3, 3, 3, 3] , lowerCAmelCase : int="gelu" , lowerCAmelCase : Any=4.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Tuple=1E-4 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[int]=1E-5 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : str , ) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = use_conv_embed
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = focal_levels
lowercase__ = focal_windows
lowercase__ = hidden_act
lowercase__ = mlp_ratio
lowercase__ = hidden_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = use_layerscale
lowercase__ = layerscale_value
lowercase__ = use_post_layernorm
lowercase__ = use_post_layernorm_in_modulation
lowercase__ = normalize_modulator
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = encoder_stride
lowercase__ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths) + 1)]
lowercase__, lowercase__ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
| 642
| 1
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : str = XGLMTokenizer
A : List[Any] = XGLMTokenizerFast
A : int = True
A : Optional[Any] = True
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = '<pad>'
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase) , lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase) , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(len(lowerCAmelCase) , 10_08)
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_08)
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
lowercase__ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return XGLMTokenizer.from_pretrained('facebook/xglm-564M')
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase , f.name)
lowercase__ = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase)
lowercase__ = pickle.dumps(lowerCAmelCase)
pickle.loads(lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = 'I was born in 92000, and this is falsé.'
lowercase__ = tokenizer.tokenize(lowerCAmelCase)
lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
@slow
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
lowercase__ = 'Hello World!'
lowercase__ = [2, 3_12_27, 44_47, 35]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowercase__ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = {
'input_ids': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name='facebook/xglm-564M' , padding=lowerCAmelCase , )
| 642
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
a__ : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
a__ : Any = {"facebook/blenderbot_small-90M": 5_12}
def _lowerCAmelCase ( A__ ):
lowercase__ = set()
lowercase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ = char
lowercase__ = set(A__ )
return pairs
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[str] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : int="__start__" , lowerCAmelCase : Dict="__end__" , lowerCAmelCase : Any="__unk__" , lowerCAmelCase : str="__null__" , **lowerCAmelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase)
with open(lowerCAmelCase , encoding='utf-8') as vocab_handle:
lowercase__ = json.load(lowerCAmelCase)
lowercase__ = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase , encoding='utf-8') as merges_handle:
lowercase__ = merges_handle.read().split('\n')[1:-1]
lowercase__ = [tuple(merge.split()) for merge in merges]
lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase))))
lowercase__ = {}
@property
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return len(self.encoder)
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase ( self : str , lowerCAmelCase : str) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ = re.sub('([.,!?()])' , R' \1' , lowerCAmelCase)
lowercase__ = re.sub('(\')' , R' \1 ' , lowerCAmelCase)
lowercase__ = re.sub(R'\s{2,}' , ' ' , lowerCAmelCase)
if "\n" in token:
lowercase__ = token.replace('\n' , ' __newln__')
lowercase__ = token.split(' ')
lowercase__ = []
for token in tokens:
if not len(lowerCAmelCase):
continue
lowercase__ = token.lower()
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = tuple(list(word[:-1]) + [word[-1] + '</w>'])
lowercase__ = get_pairs(lowerCAmelCase)
if not pairs:
words.append(lowerCAmelCase)
continue
while True:
lowercase__ = min(lowerCAmelCase , key=lambda lowerCAmelCase: self.bpe_ranks.get(lowerCAmelCase , float('inf')))
if bigram not in self.bpe_ranks:
break
lowercase__, lowercase__ = bigram
lowercase__ = []
lowercase__ = 0
while i < len(lowerCAmelCase):
try:
lowercase__ = word.index(lowerCAmelCase , lowerCAmelCase)
new_word.extend(word[i:j])
lowercase__ = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(lowerCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = new_word
if len(lowerCAmelCase) == 1:
break
else:
lowercase__ = get_pairs(lowerCAmelCase)
lowercase__ = '@@ '.join(lowerCAmelCase)
lowercase__ = word[:-4]
lowercase__ = word
words.append(lowerCAmelCase)
return " ".join(lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = re.findall(R'\S+\n?' , lowerCAmelCase)
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase).split(' ')))
return split_tokens
def UpperCAmelCase ( self : int , lowerCAmelCase : str) -> int:
"""simple docstring"""
lowercase__ = token.lower()
return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token))
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self.decoder.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : List[str]) -> str:
"""simple docstring"""
lowercase__ = ' '.join(lowerCAmelCase).replace('@@ ' , '').strip()
return out_string
def UpperCAmelCase ( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(lowerCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase) + '\n')
lowercase__ = 0
with open(lowerCAmelCase , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase: kv[1]):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!')
lowercase__ = token_index
writer.write(' '.join(lowerCAmelCase) + '\n')
index += 1
return vocab_file, merge_file
| 642
| 1
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a__ : Tuple = logging.get_logger(__name__)
a__ : int = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
a__ : List[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ ):
for attribute in key.split('.' ):
lowercase__ = getattr(A__ , A__ )
if weight_type is not None:
lowercase__ = getattr(A__ , A__ ).shape
else:
lowercase__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
else:
lowercase__ = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase__ = None
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == 'group' , )
lowercase__ = True
elif name.split('.' )[0] == "proj":
lowercase__ = fairseq_model.proj
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(A__ )[0].split('.' )[-2]
lowercase__ = mapped_key.replace('*' , A__ )
if "weight_g" in name:
lowercase__ = 'weight_g'
elif "weight_v" in name:
lowercase__ = 'weight_v'
elif "bias" in name:
lowercase__ = 'bias'
elif "weight" in name:
lowercase__ = 'weight'
else:
lowercase__ = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ ):
lowercase__ = full_name.split('conv_layers.' )[-1]
lowercase__ = name.split('.' )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A__ )
def _lowerCAmelCase ( A__ ):
lowercase__, lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(A__ , A__ , bias=A__ )
lowercase__ = emb.weight.data
return lin_layer
def _lowerCAmelCase ( A__ ):
with open(A__ , 'r' , encoding='utf-8' ) as f:
lowercase__ = f.readlines()
lowercase__ = [line.split(' ' )[0] for line in lines]
lowercase__ = len(A__ )
lowercase__ = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(A__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
lowercase__ = WavaVecaConfig.from_pretrained(A__ )
lowercase__ = SpeechaTextaConfig.from_pretrained(
A__ , vocab_size=A__ , decoder_layers=A__ , do_stable_layer_norm=A__ )
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=A__ , return_attention_mask=A__ , )
lowercase__, lowercase__, lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase__ = model[0].eval()
# set weights for wav2vec2 encoder
lowercase__ = WavaVecaModel(A__ )
lowercase__ = recursively_load_weights_wavaveca(model.encoder , A__ )
lowercase__ = SpeechaTextaForCausalLM(A__ )
lowercase__, lowercase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=A__ )
# set output linear layer
unexpected_keys.remove('embed_out' )
lowercase__ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase__ = SpeechEncoderDecoderModel(encoder=A__ , decoder=A__ )
lowercase__ = False
# add projection layer
lowercase__ = nn.Parameter(projection_layer.weight )
lowercase__ = nn.Parameter(projection_layer.bias )
lowercase__ = create_vocab_dict(A__ )
with open(os.path.join(A__ , 'vocab.json' ) , 'w' ) as fp:
json.dump(A__ , A__ )
lowercase__ = SpeechaTextaTokenizer(os.path.join(A__ , 'vocab.json' ) )
tokenizer.save_pretrained(A__ )
lowercase__ = hf_wavavec.config.to_dict()
lowercase__ = tokenizer.pad_token_id
lowercase__ = tokenizer.bos_token_id
lowercase__ = tokenizer.eos_token_id
lowercase__ = 'speech_to_text_2'
lowercase__ = 'wav2vec2'
lowercase__ = SpeechEncoderDecoderConfig.from_dict(A__ )
hf_wavavec.save_pretrained(A__ )
feature_extractor.save_pretrained(A__ )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_02_24, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
a__ : Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 642
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Optional[int] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 1
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
lowercase__ = jnp.ones((batch_size, length)) / length
return scores
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
lowercase__ = None
lowercase__ = 20
lowercase__ = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase)
# tweak scores to not be uniform anymore
lowercase__ = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
lowercase__ = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
lowercase__ = jax.nn.softmax(lowerCAmelCase , axis=-1)
lowercase__ = FlaxTemperatureLogitsWarper(temperature=0.5)
lowercase__ = FlaxTemperatureLogitsWarper(temperature=1.3)
lowercase__ = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase , scores.copy() , cur_len=lowerCAmelCase) , axis=-1)
lowercase__ = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase , scores.copy() , cur_len=lowerCAmelCase) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = None
lowercase__ = 10
lowercase__ = 2
# create ramp distribution
lowercase__ = np.broadcast_to(np.arange(lowerCAmelCase)[None, :] , (batch_size, vocab_size)).copy()
lowercase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowercase__ = FlaxTopKLogitsWarper(3)
lowercase__ = top_k_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
lowercase__ = 5
lowercase__ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
lowercase__ = np.broadcast_to(np.arange(lowerCAmelCase)[None, :] , (batch_size, length)).copy()
lowercase__ = top_k_warp_safety_check(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
lowercase__ = None
lowercase__ = 10
lowercase__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowercase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
lowercase__ = FlaxTopPLogitsWarper(0.8)
lowercase__ = np.exp(top_p_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowercase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3))
# check edge cases with negative and extreme logits
lowercase__ = np.broadcast_to(np.arange(lowerCAmelCase)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowercase__ = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
lowercase__ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
lowercase__ = top_p_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
lowercase__ = 20
lowercase__ = 4
lowercase__ = 0
lowercase__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase)
# check that min length is applied at length 5
lowercase__ = ids_tensor((batch_size, 20) , vocab_size=20)
lowercase__ = 5
lowercase__ = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase)
lowercase__ = min_dist_processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('inf')])
# check that min length is not applied anymore at length 15
lowercase__ = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase)
lowercase__ = 15
lowercase__ = min_dist_processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
self.assertFalse(jnp.isinf(lowerCAmelCase).any())
def UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
lowercase__ = 20
lowercase__ = 4
lowercase__ = 0
lowercase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase)
# check that all scores are -inf except the bos_token_id score
lowercase__ = ids_tensor((batch_size, 1) , vocab_size=20)
lowercase__ = 1
lowercase__ = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase)
lowercase__ = logits_processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowercase__ = 3
lowercase__ = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase)
lowercase__ = logits_processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
self.assertFalse(jnp.isinf(lowerCAmelCase).any())
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = 20
lowercase__ = 4
lowercase__ = 0
lowercase__ = 5
lowercase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase , eos_token_id=lowerCAmelCase)
# check that all scores are -inf except the eos_token_id when max_length is reached
lowercase__ = ids_tensor((batch_size, 4) , vocab_size=20)
lowercase__ = 4
lowercase__ = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase)
lowercase__ = logits_processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowercase__ = 3
lowercase__ = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase)
lowercase__ = logits_processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
self.assertFalse(jnp.isinf(lowerCAmelCase).any())
def UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
lowercase__ = 4
lowercase__ = 10
lowercase__ = 15
lowercase__ = 2
lowercase__ = 1
lowercase__ = 15
# dummy input_ids and scores
lowercase__ = ids_tensor((batch_size, sequence_length) , lowerCAmelCase)
lowercase__ = input_ids.copy()
lowercase__ = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase)
lowercase__ = scores.copy()
# instantiate all dist processors
lowercase__ = FlaxTemperatureLogitsWarper(temperature=0.5)
lowercase__ = FlaxTopKLogitsWarper(3)
lowercase__ = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
lowercase__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase)
lowercase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase)
lowercase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase , eos_token_id=lowerCAmelCase)
lowercase__ = 10
# no processor list
lowercase__ = temp_dist_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
lowercase__ = top_k_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
lowercase__ = top_p_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
lowercase__ = min_dist_proc(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
lowercase__ = bos_dist_proc(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
lowercase__ = eos_dist_proc(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
# with processor list
lowercase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
lowercase__ = processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = 4
lowercase__ = 10
lowercase__ = 15
lowercase__ = 2
lowercase__ = 1
lowercase__ = 15
# dummy input_ids and scores
lowercase__ = ids_tensor((batch_size, sequence_length) , lowerCAmelCase)
lowercase__ = input_ids.copy()
lowercase__ = self._get_uniform_logits(lowerCAmelCase , lowerCAmelCase)
lowercase__ = scores.copy()
# instantiate all dist processors
lowercase__ = FlaxTemperatureLogitsWarper(temperature=0.5)
lowercase__ = FlaxTopKLogitsWarper(3)
lowercase__ = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
lowercase__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase)
lowercase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase)
lowercase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase , eos_token_id=lowerCAmelCase)
lowercase__ = 10
# no processor list
def run_no_processor_list(lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : List[str]):
lowercase__ = temp_dist_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
lowercase__ = top_k_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
lowercase__ = top_p_warp(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
lowercase__ = min_dist_proc(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
lowercase__ = bos_dist_proc(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
lowercase__ = eos_dist_proc(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
return scores
# with processor list
def run_processor_list(lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any]):
lowercase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
lowercase__ = processor(lowerCAmelCase , lowerCAmelCase , cur_len=lowerCAmelCase)
return scores
lowercase__ = jax.jit(lowerCAmelCase)
lowercase__ = jax.jit(lowerCAmelCase)
lowercase__ = jitted_run_no_processor_list(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
lowercase__ = jitted_run_processor_list(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 642
|
import heapq
import sys
import numpy as np
a__ : Dict = tuple[int, int]
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = []
lowercase__ = set()
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf')
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return len(self.elements) == 0
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]) -> List[str]:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(lowerCAmelCase)
else:
# update
# print("update", item)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
if item in self.set:
self.set.remove(lowerCAmelCase)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
self.set.remove(lowerCAmelCase)
return (priority, item)
def _lowerCAmelCase ( A__ , A__ ):
# euclidean distance
lowercase__ = np.array(A__ )
lowercase__ = np.array(A__ )
return np.linalg.norm(a - b )
def _lowerCAmelCase ( A__ , A__ ):
# integer division by time variable
return consistent_heuristic(A__ , A__ ) // t
def _lowerCAmelCase ( A__ , A__ ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = g_function[start] + Wa * heuristics[i](A__ , A__ )
return ans
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = np.chararray((n, n) )
for i in range(A__ ):
for j in range(A__ ):
lowercase__ = '*'
for i in range(A__ ):
for j in range(A__ ):
if (j, (n - 1) - i) in blocks:
lowercase__ = '#'
lowercase__ = '-'
lowercase__ = back_pointer[goal]
while x != start:
((lowercase__), (lowercase__)) = x
# print(x)
lowercase__ = '-'
lowercase__ = back_pointer[x]
lowercase__ = '-'
for i in range(A__ ):
for j in range(A__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowercase__ = back_pointer[goal]
while x != start:
print(A__ , end=' ' )
lowercase__ = back_pointer[x]
print(A__ )
sys.exit()
def _lowerCAmelCase ( A__ ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
for itera in range(A__ ):
open_list[itera].remove_element(A__ )
# print("s", s)
# print("j", j)
((lowercase__), (lowercase__)) = s
lowercase__ = (x - 1, y)
lowercase__ = (x + 1, y)
lowercase__ = (x, y + 1)
lowercase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(A__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(A__ )
lowercase__ = -1
lowercase__ = float('inf' )
if valid(A__ ) and g_function[neighbours] > g_function[s] + 1:
lowercase__ = g_function[s] + 1
lowercase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(A__ , key(A__ , 0 , A__ , A__ ) )
if neighbours not in close_list_inad:
for var in range(1 , A__ ):
if key(A__ , A__ , A__ , A__ ) <= Wa * key(
A__ , 0 , A__ , A__ ):
open_list[j].put(
A__ , key(A__ , A__ , A__ , A__ ) )
def _lowerCAmelCase ( ):
lowercase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a__ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a__ : Any = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a__ : Any = make_common_ground()
a__ : Union[str, Any] = blocks_blk
# hyper parameters
a__ : List[Any] = 1
a__ : List[str] = 1
a__ : Optional[int] = 20
a__ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a__ : Tuple = (0, 0)
a__ : str = (n - 1, n - 1)
a__ : Optional[Any] = 1
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = {start: 0, goal: float('inf' )}
lowercase__ = {start: -1, goal: -1}
lowercase__ = []
lowercase__ = set()
for i in range(A__ ):
open_list.append(PriorityQueue() )
open_list[i].put(A__ , key(A__ , A__ , A__ , A__ ) )
lowercase__ = []
lowercase__ = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , A__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__, lowercase__ = open_list[i].top_show()
visited.add(A__ )
expand_state(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_inad.append(A__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__ = open_list[0].top_show()
visited.add(A__ )
expand_state(
A__ , 0 , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_anchor.append(A__ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(A__ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 642
| 1
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
a__ : Dict = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def _lowerCAmelCase ( A__ = "dhaka" , A__ = 5 ):
lowercase__ = min(A__ , 50 ) # Prevent abuse!
lowercase__ = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
lowercase__ = requests.get('https://www.google.com/search' , params=A__ , headers=A__ )
lowercase__ = BeautifulSoup(html.text , 'html.parser' )
lowercase__ = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
lowercase__ = json.dumps(A__ )
lowercase__ = json.loads(A__ )
lowercase__ = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , A__ , )
if not matched_google_image_data:
return 0
lowercase__ = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(A__ ) , )
lowercase__ = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , A__ , )
for index, fixed_full_res_image in enumerate(A__ ):
if index >= max_images:
return index
lowercase__ = bytes(A__ , 'ascii' ).decode(
'unicode-escape' )
lowercase__ = bytes(A__ , 'ascii' ).decode(
'unicode-escape' )
lowercase__ = urllib.request.build_opener()
lowercase__ = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(A__ )
lowercase__ = F'''query_{query.replace(' ' , '_' )}'''
if not os.path.exists(A__ ):
os.makedirs(A__ )
urllib.request.urlretrieve( # noqa: S310
A__ , F'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
a__ : List[str] = download_images_from_google_query(sys.argv[1])
print(F'''{image_count} images were downloaded to disk.''')
except IndexError:
print("Please provide a search term.")
raise
| 642
|
import math
import sys
def _lowerCAmelCase ( A__ ):
lowercase__ = ''
try:
with open(A__ , 'rb' ) as binary_file:
lowercase__ = binary_file.read()
for dat in data:
lowercase__ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = {'0': '0', '1': '1'}
lowercase__, lowercase__ = '', ''
lowercase__ = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase__ = lexicon[curr_string]
result += last_match_id
lowercase__ = last_match_id + '0'
if math.loga(A__ ).is_integer():
lowercase__ = {}
for curr_key in list(A__ ):
lowercase__ = lexicon.pop(A__ )
lowercase__ = new_lex
lowercase__ = last_match_id + '1'
index += 1
lowercase__ = ''
return result
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = 8
try:
with open(A__ , 'wb' ) as opened_file:
lowercase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(A__ ) , A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase__ = data_bits[counter:]
lowercase__ = data_bits[counter + 1 :]
return data_bits
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = read_file_binary(A__ )
lowercase__ = remove_prefix(A__ )
lowercase__ = decompress_data(A__ )
write_file_binary(A__ , A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 642
| 1
|
import argparse
import struct
import unittest
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : bytes) -> None:
"""simple docstring"""
lowercase__ = data
# Initialize hash values
lowercase__ = [
0X6_A_0_9_E_6_6_7,
0XB_B_6_7_A_E_8_5,
0X3_C_6_E_F_3_7_2,
0XA_5_4_F_F_5_3_A,
0X5_1_0_E_5_2_7_F,
0X9_B_0_5_6_8_8_C,
0X1_F_8_3_D_9_A_B,
0X5_B_E_0_C_D_1_9,
]
# Initialize round constants
lowercase__ = [
0X4_2_8_A_2_F_9_8,
0X7_1_3_7_4_4_9_1,
0XB_5_C_0_F_B_C_F,
0XE_9_B_5_D_B_A_5,
0X3_9_5_6_C_2_5_B,
0X5_9_F_1_1_1_F_1,
0X9_2_3_F_8_2_A_4,
0XA_B_1_C_5_E_D_5,
0XD_8_0_7_A_A_9_8,
0X1_2_8_3_5_B_0_1,
0X2_4_3_1_8_5_B_E,
0X5_5_0_C_7_D_C_3,
0X7_2_B_E_5_D_7_4,
0X8_0_D_E_B_1_F_E,
0X9_B_D_C_0_6_A_7,
0XC_1_9_B_F_1_7_4,
0XE_4_9_B_6_9_C_1,
0XE_F_B_E_4_7_8_6,
0X0_F_C_1_9_D_C_6,
0X2_4_0_C_A_1_C_C,
0X2_D_E_9_2_C_6_F,
0X4_A_7_4_8_4_A_A,
0X5_C_B_0_A_9_D_C,
0X7_6_F_9_8_8_D_A,
0X9_8_3_E_5_1_5_2,
0XA_8_3_1_C_6_6_D,
0XB_0_0_3_2_7_C_8,
0XB_F_5_9_7_F_C_7,
0XC_6_E_0_0_B_F_3,
0XD_5_A_7_9_1_4_7,
0X0_6_C_A_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_B_7_0_A_8_5,
0X2_E_1_B_2_1_3_8,
0X4_D_2_C_6_D_F_C,
0X5_3_3_8_0_D_1_3,
0X6_5_0_A_7_3_5_4,
0X7_6_6_A_0_A_B_B,
0X8_1_C_2_C_9_2_E,
0X9_2_7_2_2_C_8_5,
0XA_2_B_F_E_8_A_1,
0XA_8_1_A_6_6_4_B,
0XC_2_4_B_8_B_7_0,
0XC_7_6_C_5_1_A_3,
0XD_1_9_2_E_8_1_9,
0XD_6_9_9_0_6_2_4,
0XF_4_0_E_3_5_8_5,
0X1_0_6_A_A_0_7_0,
0X1_9_A_4_C_1_1_6,
0X1_E_3_7_6_C_0_8,
0X2_7_4_8_7_7_4_C,
0X3_4_B_0_B_C_B_5,
0X3_9_1_C_0_C_B_3,
0X4_E_D_8_A_A_4_A,
0X5_B_9_C_C_A_4_F,
0X6_8_2_E_6_F_F_3,
0X7_4_8_F_8_2_E_E,
0X7_8_A_5_6_3_6_F,
0X8_4_C_8_7_8_1_4,
0X8_C_C_7_0_2_0_8,
0X9_0_B_E_F_F_F_A,
0XA_4_5_0_6_C_E_B,
0XB_E_F_9_A_3_F_7,
0XC_6_7_1_7_8_F_2,
]
lowercase__ = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : bytes) -> bytes:
"""simple docstring"""
lowercase__ = B'\x80' + (B'\x00' * (63 - (len(lowerCAmelCase) + 8) % 64))
lowercase__ = struct.pack('>Q' , (len(lowerCAmelCase) * 8))
return data + padding + big_endian_integer
def UpperCAmelCase ( self : Any) -> None:
"""simple docstring"""
lowercase__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data) , 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowercase__ = list(struct.unpack('>16L' , lowerCAmelCase))
# add 48 0-ed integers
words += [0] * 48
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.hashes
for index in range(0 , 64):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowercase__ = (
self.ror(words[index - 15] , 7)
^ self.ror(words[index - 15] , 18)
^ (words[index - 15] >> 3)
)
lowercase__ = (
self.ror(words[index - 2] , 17)
^ self.ror(words[index - 2] , 19)
^ (words[index - 2] >> 10)
)
lowercase__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
lowercase__ = self.ror(lowerCAmelCase , 6) ^ self.ror(lowerCAmelCase , 11) ^ self.ror(lowerCAmelCase , 25)
lowercase__ = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g)
lowercase__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
lowercase__ = self.ror(lowerCAmelCase , 2) ^ self.ror(lowerCAmelCase , 13) ^ self.ror(lowerCAmelCase , 22)
lowercase__ = (a & b) ^ (a & c) ^ (b & c)
lowercase__ = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
lowercase__ = [a, b, c, d, e, f, g, h]
# Modify final values
lowercase__ = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes)
]
lowercase__ = ''.join([hex(lowerCAmelCase)[2:].zfill(8) for value in self.hashes])
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : int) -> int:
"""simple docstring"""
return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Tuple) -> None:
"""simple docstring"""
import hashlib
lowercase__ = bytes('Test String' , 'utf-8')
self.assertEqual(SHAaaa(lowerCAmelCase).hash , hashlib.shaaaa(lowerCAmelCase).hexdigest())
def _lowerCAmelCase ( ):
import doctest
doctest.testmod()
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
lowercase__ = parser.parse_args()
lowercase__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase__ = f.read()
else:
lowercase__ = bytes(A__ , 'utf-8' )
print(SHAaaa(A__ ).hash )
if __name__ == "__main__":
main()
| 642
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Tuple = {"vocab_file": "vocab.txt"}
a__ : int = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
a__ : Dict = {
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def _lowerCAmelCase ( A__ ):
with open(A__ , 'r' ) as f:
lowercase__ = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]="<unk>" , lowerCAmelCase : Dict="<cls>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : Union[str, Any]="<mask>" , lowerCAmelCase : Optional[Any]="<eos>" , **lowerCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = load_vocab_file(lowerCAmelCase)
lowercase__ = dict(enumerate(self.all_tokens))
lowercase__ = {tok: ind for ind, tok in enumerate(self.all_tokens)}
lowercase__ = unk_token
lowercase__ = cls_token
lowercase__ = pad_token
lowercase__ = mask_token
lowercase__ = eos_token
lowercase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens)
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> Dict:
"""simple docstring"""
return text.split()
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any=False) -> Union[str, Any]:
"""simple docstring"""
return len(self._id_to_token)
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens)}
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Dict , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.cls_token_id]
lowercase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!')
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List , lowerCAmelCase : Optional[List] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase__ = [1] + ([0] * len(lowerCAmelCase)) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCAmelCase) + [1]
return mask
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = os.path.join(lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt')
with open(lowerCAmelCase , 'w') as f:
f.write('\n'.join(self.all_tokens))
return (vocab_file,)
@property
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Union[List[str], List[AddedToken]] , lowerCAmelCase : bool = False) -> int:
"""simple docstring"""
return super()._add_tokens(lowerCAmelCase , special_tokens=lowerCAmelCase)
| 642
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
lowercase__ = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 1_28, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 1_42, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
lowercase__ = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 1_28,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 1_42,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(lowerCAmelCase) , lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = np.random.randn(3 , 4)
self.assertTrue(np.allclose(transpose(lowerCAmelCase) , x.transpose()))
lowercase__ = np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(transpose(lowerCAmelCase , axes=(1, 2, 0)) , x.transpose((1, 2, 0))))
@require_torch
def UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
lowercase__ = np.random.randn(3 , 4)
lowercase__ = torch.tensor(lowerCAmelCase)
self.assertTrue(np.allclose(transpose(lowerCAmelCase) , transpose(lowerCAmelCase).numpy()))
lowercase__ = np.random.randn(3 , 4 , 5)
lowercase__ = torch.tensor(lowerCAmelCase)
self.assertTrue(np.allclose(transpose(lowerCAmelCase , axes=(1, 2, 0)) , transpose(lowerCAmelCase , axes=(1, 2, 0)).numpy()))
@require_tf
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
lowercase__ = np.random.randn(3 , 4)
lowercase__ = tf.constant(lowerCAmelCase)
self.assertTrue(np.allclose(transpose(lowerCAmelCase) , transpose(lowerCAmelCase).numpy()))
lowercase__ = np.random.randn(3 , 4 , 5)
lowercase__ = tf.constant(lowerCAmelCase)
self.assertTrue(np.allclose(transpose(lowerCAmelCase , axes=(1, 2, 0)) , transpose(lowerCAmelCase , axes=(1, 2, 0)).numpy()))
@require_flax
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = np.random.randn(3 , 4)
lowercase__ = jnp.array(lowerCAmelCase)
self.assertTrue(np.allclose(transpose(lowerCAmelCase) , np.asarray(transpose(lowerCAmelCase))))
lowercase__ = np.random.randn(3 , 4 , 5)
lowercase__ = jnp.array(lowerCAmelCase)
self.assertTrue(np.allclose(transpose(lowerCAmelCase , axes=(1, 2, 0)) , np.asarray(transpose(lowerCAmelCase , axes=(1, 2, 0)))))
def UpperCAmelCase ( self : Dict) -> Any:
"""simple docstring"""
lowercase__ = np.random.randn(3 , 4)
self.assertTrue(np.allclose(reshape(lowerCAmelCase , (4, 3)) , np.reshape(lowerCAmelCase , (4, 3))))
lowercase__ = np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(reshape(lowerCAmelCase , (12, 5)) , np.reshape(lowerCAmelCase , (12, 5))))
@require_torch
def UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
lowercase__ = np.random.randn(3 , 4)
lowercase__ = torch.tensor(lowerCAmelCase)
self.assertTrue(np.allclose(reshape(lowerCAmelCase , (4, 3)) , reshape(lowerCAmelCase , (4, 3)).numpy()))
lowercase__ = np.random.randn(3 , 4 , 5)
lowercase__ = torch.tensor(lowerCAmelCase)
self.assertTrue(np.allclose(reshape(lowerCAmelCase , (12, 5)) , reshape(lowerCAmelCase , (12, 5)).numpy()))
@require_tf
def UpperCAmelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
lowercase__ = np.random.randn(3 , 4)
lowercase__ = tf.constant(lowerCAmelCase)
self.assertTrue(np.allclose(reshape(lowerCAmelCase , (4, 3)) , reshape(lowerCAmelCase , (4, 3)).numpy()))
lowercase__ = np.random.randn(3 , 4 , 5)
lowercase__ = tf.constant(lowerCAmelCase)
self.assertTrue(np.allclose(reshape(lowerCAmelCase , (12, 5)) , reshape(lowerCAmelCase , (12, 5)).numpy()))
@require_flax
def UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
lowercase__ = np.random.randn(3 , 4)
lowercase__ = jnp.array(lowerCAmelCase)
self.assertTrue(np.allclose(reshape(lowerCAmelCase , (4, 3)) , np.asarray(reshape(lowerCAmelCase , (4, 3)))))
lowercase__ = np.random.randn(3 , 4 , 5)
lowercase__ = jnp.array(lowerCAmelCase)
self.assertTrue(np.allclose(reshape(lowerCAmelCase , (12, 5)) , np.asarray(reshape(lowerCAmelCase , (12, 5)))))
def UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
lowercase__ = np.random.randn(1 , 3 , 4)
self.assertTrue(np.allclose(squeeze(lowerCAmelCase) , np.squeeze(lowerCAmelCase)))
lowercase__ = np.random.randn(1 , 4 , 1 , 5)
self.assertTrue(np.allclose(squeeze(lowerCAmelCase , axis=2) , np.squeeze(lowerCAmelCase , axis=2)))
@require_torch
def UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
lowercase__ = np.random.randn(1 , 3 , 4)
lowercase__ = torch.tensor(lowerCAmelCase)
self.assertTrue(np.allclose(squeeze(lowerCAmelCase) , squeeze(lowerCAmelCase).numpy()))
lowercase__ = np.random.randn(1 , 4 , 1 , 5)
lowercase__ = torch.tensor(lowerCAmelCase)
self.assertTrue(np.allclose(squeeze(lowerCAmelCase , axis=2) , squeeze(lowerCAmelCase , axis=2).numpy()))
@require_tf
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = np.random.randn(1 , 3 , 4)
lowercase__ = tf.constant(lowerCAmelCase)
self.assertTrue(np.allclose(squeeze(lowerCAmelCase) , squeeze(lowerCAmelCase).numpy()))
lowercase__ = np.random.randn(1 , 4 , 1 , 5)
lowercase__ = tf.constant(lowerCAmelCase)
self.assertTrue(np.allclose(squeeze(lowerCAmelCase , axis=2) , squeeze(lowerCAmelCase , axis=2).numpy()))
@require_flax
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
lowercase__ = np.random.randn(1 , 3 , 4)
lowercase__ = jnp.array(lowerCAmelCase)
self.assertTrue(np.allclose(squeeze(lowerCAmelCase) , np.asarray(squeeze(lowerCAmelCase))))
lowercase__ = np.random.randn(1 , 4 , 1 , 5)
lowercase__ = jnp.array(lowerCAmelCase)
self.assertTrue(np.allclose(squeeze(lowerCAmelCase , axis=2) , np.asarray(squeeze(lowerCAmelCase , axis=2))))
def UpperCAmelCase ( self : List[str]) -> Dict:
"""simple docstring"""
lowercase__ = np.random.randn(3 , 4)
self.assertTrue(np.allclose(expand_dims(lowerCAmelCase , axis=1) , np.expand_dims(lowerCAmelCase , axis=1)))
@require_torch
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = np.random.randn(3 , 4)
lowercase__ = torch.tensor(lowerCAmelCase)
self.assertTrue(np.allclose(expand_dims(lowerCAmelCase , axis=1) , expand_dims(lowerCAmelCase , axis=1).numpy()))
@require_tf
def UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = np.random.randn(3 , 4)
lowercase__ = tf.constant(lowerCAmelCase)
self.assertTrue(np.allclose(expand_dims(lowerCAmelCase , axis=1) , expand_dims(lowerCAmelCase , axis=1).numpy()))
@require_flax
def UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
lowercase__ = np.random.randn(3 , 4)
lowercase__ = jnp.array(lowerCAmelCase)
self.assertTrue(np.allclose(expand_dims(lowerCAmelCase , axis=1) , np.asarray(expand_dims(lowerCAmelCase , axis=1))))
| 642
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a__ : int = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
a__ : Optional[Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
a__ : Tuple = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any]) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def UpperCAmelCase ( self : int , lowerCAmelCase : List[List[List[str]]] , lowerCAmelCase : List[List[str]] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase , hypotheses=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase)
}
| 642
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Optional[int] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Dict=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=99 , lowerCAmelCase : List[Any]=[1, 1, 2] , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : int=32 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : int=37 , lowerCAmelCase : Any="gelu_new" , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : str=5_12 , lowerCAmelCase : str=3 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]=False , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = block_sizes
lowercase__ = num_decoder_layers
lowercase__ = d_model
lowercase__ = n_head
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = hidden_act
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = 2
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase__ = n_head
# Used in the tests to check the size of the first hidden state
lowercase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase__ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase__ = self.num_hidden_layers + 2
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , ) -> str:
"""simple docstring"""
lowercase__ = TFFunnelForPreTraining(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForMaskedLM(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForSequenceClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = TFFunnelForMultipleChoice(config=lowerCAmelCase)
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForTokenClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForQuestionAnswering(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
A : Dict = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Optional[int] = False
A : Optional[int] = False
def UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase)
@require_tf
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
A : List[str] = False
A : int = False
def UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self , base=lowerCAmelCase)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase)
| 642
| 1
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCAmelCase__:
'''simple docstring'''
@property
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
return self.get_dummy_input()
@property
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''')
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Any=True , lowerCAmelCase : int=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : str=False , ) -> Any:
"""simple docstring"""
lowercase__ = 4
lowercase__ = 32
lowercase__ = (32, 32)
lowercase__ = torch.manual_seed(0)
lowercase__ = torch.device(lowerCAmelCase)
lowercase__ = (batch_size, num_channels) + sizes
lowercase__ = randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=lowerCAmelCase)
lowercase__ = {'hidden_states': hidden_states}
if include_temb:
lowercase__ = 1_28
lowercase__ = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase , device=lowerCAmelCase)
if include_res_hidden_states_tuple:
lowercase__ = torch.manual_seed(1)
lowercase__ = (randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=lowerCAmelCase),)
if include_encoder_hidden_states:
lowercase__ = floats_tensor((batch_size, 32, 32)).to(lowerCAmelCase)
if include_skip_sample:
lowercase__ = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase , device=lowerCAmelCase)
return dummy_input
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 1_28,
}
if self.block_type == "up":
lowercase__ = 32
if self.block_type == "mid":
init_dict.pop('out_channels')
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
lowercase__, lowercase__ = self.prepare_init_args_and_inputs_for_common()
lowercase__ = self.block_class(**lowerCAmelCase)
unet_block.to(lowerCAmelCase)
unet_block.eval()
with torch.no_grad():
lowercase__ = unet_block(**lowerCAmelCase)
if isinstance(lowerCAmelCase , lowerCAmelCase):
lowercase__ = output[0]
self.assertEqual(output.shape , self.output_shape)
lowercase__ = output[0, -1, -3:, -3:]
lowercase__ = torch.tensor(lowerCAmelCase).to(lowerCAmelCase)
assert torch_all_close(output_slice.flatten() , lowerCAmelCase , atol=5E-3)
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps')
def UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
lowercase__, lowercase__ = self.prepare_init_args_and_inputs_for_common()
lowercase__ = self.block_class(**lowerCAmelCase)
model.to(lowerCAmelCase)
model.train()
lowercase__ = model(**lowerCAmelCase)
if isinstance(lowerCAmelCase , lowerCAmelCase):
lowercase__ = output[0]
lowercase__ = torch.device(lowerCAmelCase)
lowercase__ = randn_tensor(output.shape , device=lowerCAmelCase)
lowercase__ = torch.nn.functional.mse_loss(lowerCAmelCase , lowerCAmelCase)
loss.backward()
| 642
|
def _lowerCAmelCase ( A__ , A__ , A__ ):
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
lowercase__ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowercase__ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : List[str] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
|
from __future__ import annotations
def _lowerCAmelCase ( A__ , A__ ):
if b == 0:
return (1, 0)
((lowercase__), (lowercase__)) = extended_euclid(A__ , a % b )
lowercase__ = a // b
return (y, x - k * y)
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
def _lowerCAmelCase ( A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
if b < 0:
lowercase__ = (b % n + n) % n
return b
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__, lowercase__ = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 642
| 1
|
from pathlib import Path
import fire
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = Path(A__ )
lowercase__ = Path(A__ )
dest_dir.mkdir(exist_ok=A__ )
for path in src_dir.iterdir():
lowercase__ = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowercase__ = dest_dir.joinpath(path.name )
print(A__ )
dest_path.open('w' ).write('\n'.join(A__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 642
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[Any] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = "umt5"
A : List[str] = ["past_key_values"]
def __init__( self : List[Any] , lowerCAmelCase : Optional[int]=25_01_12 , lowerCAmelCase : str=5_12 , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : int=32 , lowerCAmelCase : int=1_28 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=1E-6 , lowerCAmelCase : Optional[int]=1.0 , lowerCAmelCase : Optional[Any]="gated-gelu" , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]="T5Tokenizer" , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=0 , **lowerCAmelCase : int , ) -> str:
"""simple docstring"""
super().__init__(
is_encoder_decoder=lowerCAmelCase , tokenizer_class=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_kv
lowercase__ = d_ff
lowercase__ = num_layers
lowercase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ = num_heads
lowercase__ = relative_attention_num_buckets
lowercase__ = relative_attention_max_distance
lowercase__ = dropout_rate
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_factor
lowercase__ = feed_forward_proj
lowercase__ = use_cache
lowercase__ = self.feed_forward_proj.split('-')
lowercase__ = act_info[-1]
lowercase__ = act_info[0] == 'gated'
if len(lowerCAmelCase) > 1 and act_info[0] != "gated" or len(lowerCAmelCase) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
lowercase__ = 'gelu_new'
@property
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
return self.d_model
@property
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
return self.num_heads
@property
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return self.num_layers
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCAmelCase ( self : Optional[int]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase__ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowercase__ = 'past_encoder_sequence + sequence'
lowercase__ = {0: 'batch'}
lowercase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return 13
@property
def UpperCAmelCase ( self : Optional[Any]) -> float:
"""simple docstring"""
return 5E-4
| 642
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = StableDiffusionSAGPipeline
A : str = TEXT_TO_IMAGE_PARAMS
A : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
A : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
A : Any = False
def UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0)
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0)
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowercase__ = CLIPTextModel(lowerCAmelCase)
lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
lowercase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Dict=0) -> Optional[int]:
"""simple docstring"""
if str(lowerCAmelCase).startswith('mps'):
lowercase__ = torch.manual_seed(lowerCAmelCase)
else:
lowercase__ = torch.Generator(device=lowerCAmelCase).manual_seed(lowerCAmelCase)
lowercase__ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4')
lowercase__ = sag_pipe.to(lowerCAmelCase)
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = '.'
lowercase__ = torch.manual_seed(0)
lowercase__ = sag_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np')
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase__ = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-2
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
lowercase__ = sag_pipe.to(lowerCAmelCase)
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = '.'
lowercase__ = torch.manual_seed(0)
lowercase__ = sag_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np')
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase__ = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-2
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
lowercase__ = sag_pipe.to(lowerCAmelCase)
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = '.'
lowercase__ = torch.manual_seed(0)
lowercase__ = sag_pipe(
[prompt] , width=7_68 , height=5_12 , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
lowercase__ = output.images
assert image.shape == (1, 5_12, 7_68, 3)
| 642
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : str = XGLMTokenizer
A : List[Any] = XGLMTokenizerFast
A : int = True
A : Optional[Any] = True
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = '<pad>'
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase) , lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase) , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(len(lowerCAmelCase) , 10_08)
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_08)
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
lowercase__ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return XGLMTokenizer.from_pretrained('facebook/xglm-564M')
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase , f.name)
lowercase__ = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase)
lowercase__ = pickle.dumps(lowerCAmelCase)
pickle.loads(lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = 'I was born in 92000, and this is falsé.'
lowercase__ = tokenizer.tokenize(lowerCAmelCase)
lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
@slow
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
lowercase__ = 'Hello World!'
lowercase__ = [2, 3_12_27, 44_47, 35]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowercase__ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = {
'input_ids': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name='facebook/xglm-564M' , padding=lowerCAmelCase , )
| 642
| 1
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( A__ , A__ , A__ ):
# Initialise PyTorch model
lowercase__ = TaConfig.from_json_file(A__ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowercase__ = TaForConditionalGeneration(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(A__ , A__ , A__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(A__ )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a__ : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 642
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
lowercase__ = data
lowercase__ = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0]
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = B'\x80' + B'\x00' * (63 - (len(self.data) + 8) % 64)
lowercase__ = self.data + padding + struct.pack('>Q' , 8 * len(self.data))
return padded_data
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64)
]
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> List[Any]:
"""simple docstring"""
lowercase__ = list(struct.unpack('>16L' , lowerCAmelCase)) + [0] * 64
for i in range(16 , 80):
lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1)
return w
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.padding()
lowercase__ = self.split_blocks()
for block in self.blocks:
lowercase__ = self.expand_block(lowerCAmelCase)
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.h
for i in range(0 , 80):
if 0 <= i < 20:
lowercase__ = (b & c) | ((~b) & d)
lowercase__ = 0X5_A_8_2_7_9_9_9
elif 20 <= i < 40:
lowercase__ = b ^ c ^ d
lowercase__ = 0X6_E_D_9_E_B_A_1
elif 40 <= i < 60:
lowercase__ = (b & c) | (b & d) | (c & d)
lowercase__ = 0X8_F_1_B_B_C_D_C
elif 60 <= i < 80:
lowercase__ = b ^ c ^ d
lowercase__ = 0XC_A_6_2_C_1_D_6
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = (
self.rotate(lowerCAmelCase , 5) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F,
a,
self.rotate(lowerCAmelCase , 30),
c,
d,
)
lowercase__ = (
self.h[0] + a & 0XF_F_F_F_F_F_F_F,
self.h[1] + b & 0XF_F_F_F_F_F_F_F,
self.h[2] + c & 0XF_F_F_F_F_F_F_F,
self.h[3] + d & 0XF_F_F_F_F_F_F_F,
self.h[4] + e & 0XF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h)
def _lowerCAmelCase ( ):
lowercase__ = B'Test String'
assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowercase__ = parser.parse_args()
lowercase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase__ = f.read()
else:
lowercase__ = bytes(A__ , 'utf-8' )
print(SHAaHash(A__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 642
| 1
|
import numpy
# List of input, output pairs
a__ : int = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
a__ : Optional[Any] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
a__ : Optional[int] = [2, 4, 1, 5]
a__ : str = len(train_data)
a__ : Optional[int] = 0.0_0_9
def _lowerCAmelCase ( A__ , A__="train" ):
return calculate_hypothesis_value(A__ , A__ ) - output(
A__ , A__ )
def _lowerCAmelCase ( A__ ):
lowercase__ = 0
for i in range(len(A__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCAmelCase ( A__ , A__ ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCAmelCase ( A__ , A__ ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCAmelCase ( A__ , A__=m ):
lowercase__ = 0
for i in range(A__ ):
if index == -1:
summation_value += _error(A__ )
else:
summation_value += _error(A__ ) * train_data[i][0][index]
return summation_value
def _lowerCAmelCase ( A__ ):
lowercase__ = summation_of_cost_derivative(A__ , A__ ) / m
return cost_derivative_value
def _lowerCAmelCase ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowercase__ = 0.00_00_02
lowercase__ = 0
lowercase__ = 0
while True:
j += 1
lowercase__ = [0, 0, 0, 0]
for i in range(0 , len(A__ ) ):
lowercase__ = get_cost_derivative(i - 1 )
lowercase__ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
A__ , A__ , atol=A__ , rtol=A__ , ):
break
lowercase__ = temp_parameter_vector
print(('Number of iterations:', j) )
def _lowerCAmelCase ( ):
for i in range(len(A__ ) ):
print(('Actual output value:', output(A__ , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(A__ , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 642
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
a__ : List[Any] = logging.get_logger(__name__)
a__ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
a__ : List[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
a__ : int = {
"facebook/bart-base": 10_24,
"facebook/bart-large": 10_24,
"facebook/bart-large-mnli": 10_24,
"facebook/bart-large-cnn": 10_24,
"facebook/bart-large-xsum": 10_24,
"yjernite/bart_eli5": 10_24,
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Optional[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = ["input_ids", "attention_mask"]
A : Any = BartTokenizer
def __init__( self : List[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str="replace" , lowerCAmelCase : str="<s>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : str="<unk>" , lowerCAmelCase : int="<pad>" , lowerCAmelCase : int="<mask>" , lowerCAmelCase : Dict=False , lowerCAmelCase : List[Any]=True , **lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = getattr(lowerCAmelCase , pre_tok_state.pop('type'))
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**lowerCAmelCase)
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = 'post_processor'
lowercase__ = getattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state['sep'])
if "cls" in state:
lowercase__ = tuple(state['cls'])
lowercase__ = False
if state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get('trim_offsets' , lowerCAmelCase) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(lowerCAmelCase , state.pop('type'))
lowercase__ = component_class(**lowerCAmelCase)
setattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
@property
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else value
lowercase__ = value
def UpperCAmelCase ( self : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[int]) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase)
return tuple(lowerCAmelCase)
def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=None) -> Tuple:
"""simple docstring"""
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 642
| 1
|
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : torch.FloatTensor
A : Optional[torch.FloatTensor] = None
def _lowerCAmelCase ( A__ , A__=0.9_99 , A__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowercase__ = []
for i in range(A__ ):
lowercase__ = i / num_diffusion_timesteps
lowercase__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , lowerCAmelCase : int = 10_00 , lowerCAmelCase : str = "fixed_small_log" , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[float] = 1.0 , lowerCAmelCase : str = "epsilon" , lowerCAmelCase : str = "squaredcos_cap_v2" , ) -> List[str]:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'')
lowercase__ = betas_for_alpha_bar(lowerCAmelCase)
lowercase__ = 1.0 - self.betas
lowercase__ = torch.cumprod(self.alphas , dim=0)
lowercase__ = torch.tensor(1.0)
# standard deviation of the initial noise distribution
lowercase__ = 1.0
# setable values
lowercase__ = None
lowercase__ = torch.from_numpy(np.arange(0 , lowerCAmelCase)[::-1].copy())
lowercase__ = variance_type
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : Optional[int] = None) -> torch.FloatTensor:
"""simple docstring"""
return sample
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Union[str, torch.device] = None) -> Optional[int]:
"""simple docstring"""
lowercase__ = num_inference_steps
lowercase__ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
lowercase__ = (np.arange(0 , lowerCAmelCase) * step_ratio).round()[::-1].copy().astype(np.intaa)
lowercase__ = torch.from_numpy(lowerCAmelCase).to(lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : str=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Dict=None) -> Tuple:
"""simple docstring"""
if prev_timestep is None:
lowercase__ = t - 1
lowercase__ = self.alphas_cumprod[t]
lowercase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase__ = self.betas[t]
else:
lowercase__ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
lowercase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
lowercase__ = torch.log(torch.clamp(lowerCAmelCase , min=1E-2_0))
lowercase__ = torch.exp(0.5 * variance)
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
lowercase__ = variance.log()
lowercase__ = beta.log()
lowercase__ = (predicted_variance + 1) / 2
lowercase__ = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : int , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : str=None , lowerCAmelCase : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
"""simple docstring"""
lowercase__ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
lowercase__, lowercase__ = torch.split(lowerCAmelCase , sample.shape[1] , dim=1)
else:
lowercase__ = None
# 1. compute alphas, betas
if prev_timestep is None:
lowercase__ = t - 1
lowercase__ = self.alphas_cumprod[t]
lowercase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
lowercase__ = self.betas[t]
lowercase__ = self.alphas[t]
else:
lowercase__ = 1 - alpha_prod_t / alpha_prod_t_prev
lowercase__ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
' for the UnCLIPScheduler.')
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ = torch.clamp(
lowerCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
lowercase__ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowercase__ = 0
if t > 0:
lowercase__ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowerCAmelCase , device=model_output.device)
lowercase__ = self._get_variance(
lowerCAmelCase , predicted_variance=lowerCAmelCase , prev_timestep=lowerCAmelCase , )
if self.variance_type == "fixed_small_log":
lowercase__ = variance
elif self.variance_type == "learned_range":
lowercase__ = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
' for the UnCLIPScheduler.')
lowercase__ = variance * variance_noise
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowerCAmelCase , pred_original_sample=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : torch.IntTensor , ) -> torch.FloatTensor:
"""simple docstring"""
lowercase__ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype)
lowercase__ = timesteps.to(original_samples.device)
lowercase__ = alphas_cumprod[timesteps] ** 0.5
lowercase__ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
lowercase__ = sqrt_alpha_prod.unsqueeze(-1)
lowercase__ = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase__ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
lowercase__ = sqrt_one_minus_alpha_prod.unsqueeze(-1)
lowercase__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 642
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : str = (DDIMParallelScheduler,)
A : Any = (("eta", 0.0), ("num_inference_steps", 50))
def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = {
'num_train_timesteps': 10_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**lowerCAmelCase)
return config
def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**lowerCAmelCase)
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase)
for t in scheduler.timesteps:
lowercase__ = model(lowerCAmelCase , lowerCAmelCase)
lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase).prev_sample
return sample
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase)
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(steps_offset=1)
lowercase__ = scheduler_class(**lowerCAmelCase)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1]))
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , )
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00]):
self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00) - 0.1_47_71)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60) - 0.3_24_60)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98) - 0.02)) < 1E-5
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
scheduler.set_timesteps(lowerCAmelCase)
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = self.dummy_sample_deter + 0.1
lowercase__ = self.dummy_sample_deter - 0.1
lowercase__ = samplea.shape[0]
lowercase__ = torch.stack([samplea, samplea, samplea] , dim=0)
lowercase__ = torch.arange(lowerCAmelCase)[0:3, None].repeat(1 , lowerCAmelCase)
lowercase__ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowercase__ = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , lowerCAmelCase)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 11_47.79_04) < 1E-2
assert abs(result_mean.item() - 0.49_82) < 1E-3
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
lowercase__ = self.full_loop()
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_72.00_67) < 1E-2
assert abs(result_mean.item() - 0.22_39_67) < 1E-3
def UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(prediction_type='v_prediction')
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 52.53_02) < 1E-2
assert abs(result_mean.item() - 0.06_84) < 1E-3
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.82_95) < 1E-2
assert abs(result_mean.item() - 0.19_51) < 1E-3
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.07_84) < 1E-2
assert abs(result_mean.item() - 0.19_41) < 1E-3
| 642
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Tuple = {
"configuration_table_transformer": [
"TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TableTransformerConfig",
"TableTransformerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
"TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TableTransformerForObjectDetection",
"TableTransformerModel",
"TableTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
a__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
|
import cva
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : float , lowerCAmelCase : int) -> Dict:
"""simple docstring"""
if k in (0.04, 0.06):
lowercase__ = k
lowercase__ = window_size
else:
raise ValueError('invalid k value')
def __str__( self : Tuple) -> str:
"""simple docstring"""
return str(self.k)
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowercase__ = cva.imread(lowerCAmelCase , 0)
lowercase__, lowercase__ = img.shape
lowercase__ = []
lowercase__ = img.copy()
lowercase__ = cva.cvtColor(lowerCAmelCase , cva.COLOR_GRAY2RGB)
lowercase__, lowercase__ = np.gradient(lowerCAmelCase)
lowercase__ = dx**2
lowercase__ = dy**2
lowercase__ = dx * dy
lowercase__ = 0.04
lowercase__ = self.window_size // 2
for y in range(lowerCAmelCase , h - offset):
for x in range(lowerCAmelCase , w - offset):
lowercase__ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = (wxx * wyy) - (wxy**2)
lowercase__ = wxx + wyy
lowercase__ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0) , 0)
color_img.itemset((y, x, 1) , 0)
color_img.itemset((y, x, 2) , 2_55)
return color_img, corner_list
if __name__ == "__main__":
a__ : Dict = HarrisCorner(0.0_4, 3)
a__ , a__ : Dict = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 642
| 1
|
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
a__ : List[str] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__=False , ):
output_path.parent.mkdir(parents=A__ , exist_ok=A__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , use_external_data_format=A__ , enable_onnx_checker=A__ , opset_version=A__ , )
else:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , opset_version=A__ , )
@torch.no_grad()
def _lowerCAmelCase ( A__ , A__ , A__ , A__ = False ):
lowercase__ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase__ = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
lowercase__ = 'cpu'
lowercase__ = Path(A__ )
# VAE DECODER
lowercase__ = AutoencoderKL.from_pretrained(model_path + '/vae' )
lowercase__ = vae_decoder.config.latent_channels
# forward only through the decoder part
lowercase__ = vae_decoder.decode
onnx_export(
A__ , model_args=(
torch.randn(1 , A__ , 25 , 25 ).to(device=A__ , dtype=A__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=A__ , )
del vae_decoder
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
a__ : Any = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 642
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : List[Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : int = "speech_to_text"
A : Optional[Any] = ["past_key_values"]
A : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[int] , lowerCAmelCase : Tuple=1_00_00 , lowerCAmelCase : int=12 , lowerCAmelCase : int=20_48 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : str=6 , lowerCAmelCase : Dict=20_48 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict="relu" , lowerCAmelCase : Tuple=2_56 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Any=60_00 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[Any]=(5, 5) , lowerCAmelCase : Union[str, Any]=10_24 , lowerCAmelCase : List[Any]=80 , lowerCAmelCase : List[str]=1 , **lowerCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_source_positions
lowercase__ = max_target_positions
lowercase__ = num_conv_layers
lowercase__ = list(lowerCAmelCase)
lowercase__ = conv_channels
lowercase__ = input_feat_per_channel
lowercase__ = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''')
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 642
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Any = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Any = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 1
|
def _lowerCAmelCase ( A__ ):
lowercase__ = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase__ = True
for i in range(0 , len(A__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase__, lowercase__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase__ = False
for i in range(1 , len(A__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase__, lowercase__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase__ = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
a__ : int = [int(x) for x in input().split()]
# inputing elements of the list in one line
a__ : Any = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 642
|
# Imports
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None) -> Dict:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : str=None , lowerCAmelCase : str=None) -> int:
"""simple docstring"""
if red is not None:
lowercase__ = red
if green is not None:
lowercase__ = green
if blue is not None:
lowercase__ = blue
if red_edge is not None:
lowercase__ = red_edge
if nir is not None:
lowercase__ = nir
return True
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Union[str, Any]="" , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None) -> Union[str, Any]:
"""simple docstring"""
self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase)
lowercase__ = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!')
return False
def UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any]=0.08 , lowerCAmelCase : Optional[int]=1.22 , lowerCAmelCase : int=0.03) -> List[Any]:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return (self.nir / self.green) - 1
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
return (self.red - self.blue) / self.red
def UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
return self.nir - self.green
def UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def UpperCAmelCase ( self : int , lowerCAmelCase : int=0.16) -> Dict:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase ( self : str , lowerCAmelCase : Optional[int]=0.5) -> Union[str, Any]:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None) -> Tuple:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
lowercase__ = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
lowercase__ = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def UpperCAmelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
return self.nir / self.red
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 642
| 1
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Tuple = {"vocab_file": "vocab.txt"}
a__ : int = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
a__ : Dict = {
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def _lowerCAmelCase ( A__ ):
with open(A__ , 'r' ) as f:
lowercase__ = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]="<unk>" , lowerCAmelCase : Dict="<cls>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : Union[str, Any]="<mask>" , lowerCAmelCase : Optional[Any]="<eos>" , **lowerCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = load_vocab_file(lowerCAmelCase)
lowercase__ = dict(enumerate(self.all_tokens))
lowercase__ = {tok: ind for ind, tok in enumerate(self.all_tokens)}
lowercase__ = unk_token
lowercase__ = cls_token
lowercase__ = pad_token
lowercase__ = mask_token
lowercase__ = eos_token
lowercase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens)
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> Dict:
"""simple docstring"""
return text.split()
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any=False) -> Union[str, Any]:
"""simple docstring"""
return len(self._id_to_token)
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens)}
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Dict , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.cls_token_id]
lowercase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!')
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List , lowerCAmelCase : Optional[List] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase__ = [1] + ([0] * len(lowerCAmelCase)) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCAmelCase) + [1]
return mask
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = os.path.join(lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt')
with open(lowerCAmelCase , 'w') as f:
f.write('\n'.join(self.all_tokens))
return (vocab_file,)
@property
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Union[List[str], List[AddedToken]] , lowerCAmelCase : bool = False) -> int:
"""simple docstring"""
return super()._add_tokens(lowerCAmelCase , special_tokens=lowerCAmelCase)
| 642
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCAmelCase__( unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = load_tool('text-classification')
self.tool.setup()
lowercase__ = load_tool('text-classification' , remote=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
lowercase__ = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
lowercase__ = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
| 642
| 1
|
import functools
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = len(A__ )
lowercase__ = len(A__ )
@functools.cache
def min_distance(A__ , A__ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowercase__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , A__ ) , 1 + min_distance(A__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = None
A : Optional[int] = None
@property
def UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowerCAmelCase , 'feature_size'))
self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate'))
self.assertTrue(hasattr(lowerCAmelCase , 'padding_value'))
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(lowerCAmelCase) == len(lowerCAmelCase) for x, y in zip(lowerCAmelCase , processed_features[input_name])))
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def UpperCAmelCase ( self : str , lowerCAmelCase : str=False) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = self.feat_extract_tester.seq_length_diff
lowercase__ = self.feat_extract_tester.max_seq_length + pad_diff
lowercase__ = self.feat_extract_tester.min_seq_length
lowercase__ = self.feat_extract_tester.batch_size
lowercase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , padding=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest')
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1]))
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
lowercase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length')[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
self.assertTrue(all(len(lowerCAmelCase) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
lowercase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCAmelCase) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
lowercase__ = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1E-3)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict=False) -> str:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : str , lowerCAmelCase : Optional[Any]):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
# truncate to smallest
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]))
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to smallest with np
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to middle
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length' , truncation=lowerCAmelCase)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = 12
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , )
lowercase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase__ = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
lowercase__ = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
@require_torch
def UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='pt')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2)
@require_tf
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='tf')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2)
def UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = min(lowerCAmelCase)
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 642
| 1
|
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class UpperCAmelCase__:
'''simple docstring'''
def UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
lowercase__ = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
lowercase__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=lowerCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
torch.manual_seed(0)
lowercase__ = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
lowercase__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5')
torch.manual_seed(0)
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
torch.manual_seed(0)
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , thresholding=lowerCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0)
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0)
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**lowerCAmelCase)
pipe.to(lowerCAmelCase)
pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = self.get_dummy_inputs(lowerCAmelCase)
lowercase__ = inputs['prompt']
lowercase__ = inputs['generator']
lowercase__ = inputs['num_inference_steps']
lowercase__ = inputs['output_type']
if "image" in inputs:
lowercase__ = inputs['image']
else:
lowercase__ = None
if "mask_image" in inputs:
lowercase__ = inputs['mask_image']
else:
lowercase__ = None
if "original_image" in inputs:
lowercase__ = inputs['original_image']
else:
lowercase__ = None
lowercase__, lowercase__ = pipe.encode_prompt(lowerCAmelCase)
# inputs with prompt converted to embeddings
lowercase__ = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
lowercase__ = pipe(**lowerCAmelCase)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase)
lowercase__ = self.pipeline_class.from_pretrained(lowerCAmelCase)
pipe_loaded.to(lowerCAmelCase)
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase , lowerCAmelCase) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ = self.get_dummy_inputs(lowerCAmelCase)
lowercase__ = inputs['generator']
lowercase__ = inputs['num_inference_steps']
lowercase__ = inputs['output_type']
# inputs with prompt converted to embeddings
lowercase__ = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
lowercase__ = pipe_loaded(**lowerCAmelCase)[0]
lowercase__ = np.abs(to_np(lowerCAmelCase) - to_np(lowerCAmelCase)).max()
self.assertLess(lowerCAmelCase , 1E-4)
def UpperCAmelCase ( self : Dict) -> str:
"""simple docstring"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**lowerCAmelCase)
pipe.to(lowerCAmelCase)
pipe.set_progress_bar_config(disable=lowerCAmelCase)
lowercase__ = self.get_dummy_inputs(lowerCAmelCase)
lowercase__ = pipe(**lowerCAmelCase)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase)
lowercase__ = self.pipeline_class.from_pretrained(lowerCAmelCase)
pipe_loaded.to(lowerCAmelCase)
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase)
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests
lowercase__ = self.get_dummy_inputs(lowerCAmelCase)
lowercase__ = pipe_loaded(**lowerCAmelCase)[0]
lowercase__ = np.abs(to_np(lowerCAmelCase) - to_np(lowerCAmelCase)).max()
self.assertLess(lowerCAmelCase , 1E-4)
| 642
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _lowerCAmelCase ( A__ ):
lowercase__ = prime_factors(A__ )
if is_square_free(A__ ):
return -1 if len(A__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 1
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCAmelCase__( unittest.TestCase , lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = load_tool('text-classification')
self.tool.setup()
lowercase__ = load_tool('text-classification' , remote=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Tuple:
"""simple docstring"""
lowercase__ = self.tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.remote_tool('That\'s quite cool' , ['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
def UpperCAmelCase ( self : Any) -> Any:
"""simple docstring"""
lowercase__ = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'])
self.assertEqual(lowerCAmelCase , 'positive')
| 642
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : List[str] = logging.get_logger(__name__)
a__ : List[Any] = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
A : List[str] = "focalnet"
def __init__( self : Dict , lowerCAmelCase : Union[str, Any]=2_24 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : int=3 , lowerCAmelCase : Union[str, Any]=96 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : int=[1_92, 3_84, 7_68, 7_68] , lowerCAmelCase : str=[2, 2, 6, 2] , lowerCAmelCase : Tuple=[2, 2, 2, 2] , lowerCAmelCase : Optional[Any]=[3, 3, 3, 3] , lowerCAmelCase : int="gelu" , lowerCAmelCase : Any=4.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Tuple=1E-4 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[int]=1E-5 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : str , ) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = use_conv_embed
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = focal_levels
lowercase__ = focal_windows
lowercase__ = hidden_act
lowercase__ = mlp_ratio
lowercase__ = hidden_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = use_layerscale
lowercase__ = layerscale_value
lowercase__ = use_post_layernorm
lowercase__ = use_post_layernorm_in_modulation
lowercase__ = normalize_modulator
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = encoder_stride
lowercase__ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths) + 1)]
lowercase__, lowercase__ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
| 642
| 1
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : str = (DDIMParallelScheduler,)
A : Any = (("eta", 0.0), ("num_inference_steps", 50))
def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = {
'num_train_timesteps': 10_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**lowerCAmelCase)
return config
def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**lowerCAmelCase)
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase)
for t in scheduler.timesteps:
lowercase__ = model(lowerCAmelCase , lowerCAmelCase)
lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase).prev_sample
return sample
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase)
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(steps_offset=1)
lowercase__ = scheduler_class(**lowerCAmelCase)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1]))
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , )
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00]):
self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00) - 0.1_47_71)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60) - 0.3_24_60)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98) - 0.02)) < 1E-5
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
scheduler.set_timesteps(lowerCAmelCase)
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = self.dummy_sample_deter + 0.1
lowercase__ = self.dummy_sample_deter - 0.1
lowercase__ = samplea.shape[0]
lowercase__ = torch.stack([samplea, samplea, samplea] , dim=0)
lowercase__ = torch.arange(lowerCAmelCase)[0:3, None].repeat(1 , lowerCAmelCase)
lowercase__ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowercase__ = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , lowerCAmelCase)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 11_47.79_04) < 1E-2
assert abs(result_mean.item() - 0.49_82) < 1E-3
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
lowercase__ = self.full_loop()
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_72.00_67) < 1E-2
assert abs(result_mean.item() - 0.22_39_67) < 1E-3
def UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(prediction_type='v_prediction')
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 52.53_02) < 1E-2
assert abs(result_mean.item() - 0.06_84) < 1E-3
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.82_95) < 1E-2
assert abs(result_mean.item() - 0.19_51) < 1E-3
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.07_84) < 1E-2
assert abs(result_mean.item() - 0.19_41) < 1E-3
| 642
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
a__ : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
a__ : Any = {"facebook/blenderbot_small-90M": 5_12}
def _lowerCAmelCase ( A__ ):
lowercase__ = set()
lowercase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ = char
lowercase__ = set(A__ )
return pairs
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[str] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Tuple = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : int="__start__" , lowerCAmelCase : Dict="__end__" , lowerCAmelCase : Any="__unk__" , lowerCAmelCase : str="__null__" , **lowerCAmelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase)
with open(lowerCAmelCase , encoding='utf-8') as vocab_handle:
lowercase__ = json.load(lowerCAmelCase)
lowercase__ = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase , encoding='utf-8') as merges_handle:
lowercase__ = merges_handle.read().split('\n')[1:-1]
lowercase__ = [tuple(merge.split()) for merge in merges]
lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase))))
lowercase__ = {}
@property
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return len(self.encoder)
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase ( self : str , lowerCAmelCase : str) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ = re.sub('([.,!?()])' , R' \1' , lowerCAmelCase)
lowercase__ = re.sub('(\')' , R' \1 ' , lowerCAmelCase)
lowercase__ = re.sub(R'\s{2,}' , ' ' , lowerCAmelCase)
if "\n" in token:
lowercase__ = token.replace('\n' , ' __newln__')
lowercase__ = token.split(' ')
lowercase__ = []
for token in tokens:
if not len(lowerCAmelCase):
continue
lowercase__ = token.lower()
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = tuple(list(word[:-1]) + [word[-1] + '</w>'])
lowercase__ = get_pairs(lowerCAmelCase)
if not pairs:
words.append(lowerCAmelCase)
continue
while True:
lowercase__ = min(lowerCAmelCase , key=lambda lowerCAmelCase: self.bpe_ranks.get(lowerCAmelCase , float('inf')))
if bigram not in self.bpe_ranks:
break
lowercase__, lowercase__ = bigram
lowercase__ = []
lowercase__ = 0
while i < len(lowerCAmelCase):
try:
lowercase__ = word.index(lowerCAmelCase , lowerCAmelCase)
new_word.extend(word[i:j])
lowercase__ = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(lowerCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowercase__ = tuple(lowerCAmelCase)
lowercase__ = new_word
if len(lowerCAmelCase) == 1:
break
else:
lowercase__ = get_pairs(lowerCAmelCase)
lowercase__ = '@@ '.join(lowerCAmelCase)
lowercase__ = word[:-4]
lowercase__ = word
words.append(lowerCAmelCase)
return " ".join(lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = re.findall(R'\S+\n?' , lowerCAmelCase)
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase).split(' ')))
return split_tokens
def UpperCAmelCase ( self : int , lowerCAmelCase : str) -> int:
"""simple docstring"""
lowercase__ = token.lower()
return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token))
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self.decoder.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : List[str]) -> str:
"""simple docstring"""
lowercase__ = ' '.join(lowerCAmelCase).replace('@@ ' , '').strip()
return out_string
def UpperCAmelCase ( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(lowerCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase) + '\n')
lowercase__ = 0
with open(lowerCAmelCase , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase: kv[1]):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!')
lowercase__ = token_index
writer.write(' '.join(lowerCAmelCase) + '\n')
index += 1
return vocab_file, merge_file
| 642
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Tuple = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Optional[Any] = "mobilenet_v2"
def __init__( self : Dict , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Union[str, Any]=2_24 , lowerCAmelCase : Optional[Any]=1.0 , lowerCAmelCase : Optional[int]=8 , lowerCAmelCase : Dict=8 , lowerCAmelCase : int=6 , lowerCAmelCase : Union[str, Any]=32 , lowerCAmelCase : int=True , lowerCAmelCase : int=True , lowerCAmelCase : int="relu6" , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[Any]=0.8 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : str=0.0_01 , lowerCAmelCase : List[Any]=2_55 , **lowerCAmelCase : List[str] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.')
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = depth_multiplier
lowercase__ = depth_divisible_by
lowercase__ = min_depth
lowercase__ = expand_ratio
lowercase__ = output_stride
lowercase__ = first_layer_is_expansion
lowercase__ = finegrained_output
lowercase__ = hidden_act
lowercase__ = tf_padding
lowercase__ = classifier_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = semantic_loss_ignore_index
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = version.parse("1.11" )
@property
def UpperCAmelCase ( self : Tuple) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([('pixel_values', {0: 'batch'})])
@property
def UpperCAmelCase ( self : Optional[Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})])
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})])
@property
def UpperCAmelCase ( self : Any) -> float:
"""simple docstring"""
return 1E-4
| 642
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Optional[int] = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 642
| 1
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 642
|
import heapq
import sys
import numpy as np
a__ : Dict = tuple[int, int]
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : List[str]) -> Any:
"""simple docstring"""
lowercase__ = []
lowercase__ = set()
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf')
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
return len(self.elements) == 0
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]) -> List[str]:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(lowerCAmelCase)
else:
# update
# print("update", item)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> Tuple:
"""simple docstring"""
if item in self.set:
self.set.remove(lowerCAmelCase)
lowercase__ = []
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
return self.elements[0][1]
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
((lowercase__), (lowercase__)) = heapq.heappop(self.elements)
self.set.remove(lowerCAmelCase)
return (priority, item)
def _lowerCAmelCase ( A__ , A__ ):
# euclidean distance
lowercase__ = np.array(A__ )
lowercase__ = np.array(A__ )
return np.linalg.norm(a - b )
def _lowerCAmelCase ( A__ , A__ ):
# integer division by time variable
return consistent_heuristic(A__ , A__ ) // t
def _lowerCAmelCase ( A__ , A__ ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__ = g_function[start] + Wa * heuristics[i](A__ , A__ )
return ans
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = np.chararray((n, n) )
for i in range(A__ ):
for j in range(A__ ):
lowercase__ = '*'
for i in range(A__ ):
for j in range(A__ ):
if (j, (n - 1) - i) in blocks:
lowercase__ = '#'
lowercase__ = '-'
lowercase__ = back_pointer[goal]
while x != start:
((lowercase__), (lowercase__)) = x
# print(x)
lowercase__ = '-'
lowercase__ = back_pointer[x]
lowercase__ = '-'
for i in range(A__ ):
for j in range(A__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowercase__ = back_pointer[goal]
while x != start:
print(A__ , end=' ' )
lowercase__ = back_pointer[x]
print(A__ )
sys.exit()
def _lowerCAmelCase ( A__ ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
for itera in range(A__ ):
open_list[itera].remove_element(A__ )
# print("s", s)
# print("j", j)
((lowercase__), (lowercase__)) = s
lowercase__ = (x - 1, y)
lowercase__ = (x + 1, y)
lowercase__ = (x, y + 1)
lowercase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(A__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(A__ )
lowercase__ = -1
lowercase__ = float('inf' )
if valid(A__ ) and g_function[neighbours] > g_function[s] + 1:
lowercase__ = g_function[s] + 1
lowercase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(A__ , key(A__ , 0 , A__ , A__ ) )
if neighbours not in close_list_inad:
for var in range(1 , A__ ):
if key(A__ , A__ , A__ , A__ ) <= Wa * key(
A__ , 0 , A__ , A__ ):
open_list[j].put(
A__ , key(A__ , A__ , A__ , A__ ) )
def _lowerCAmelCase ( ):
lowercase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a__ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a__ : Any = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a__ : Any = make_common_ground()
a__ : Union[str, Any] = blocks_blk
# hyper parameters
a__ : List[Any] = 1
a__ : List[str] = 1
a__ : Optional[int] = 20
a__ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a__ : Tuple = (0, 0)
a__ : str = (n - 1, n - 1)
a__ : Optional[Any] = 1
def _lowerCAmelCase ( A__ , A__ , A__ ):
lowercase__ = {start: 0, goal: float('inf' )}
lowercase__ = {start: -1, goal: -1}
lowercase__ = []
lowercase__ = set()
for i in range(A__ ):
open_list.append(PriorityQueue() )
open_list[i].put(A__ , key(A__ , A__ , A__ , A__ ) )
lowercase__ = []
lowercase__ = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , A__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__, lowercase__ = open_list[i].top_show()
visited.add(A__ )
expand_state(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_inad.append(A__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(A__ , A__ , A__ )
else:
lowercase__ = open_list[0].top_show()
visited.add(A__ )
expand_state(
A__ , 0 , A__ , A__ , A__ , A__ , A__ , A__ , )
close_list_anchor.append(A__ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(A__ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 642
| 1
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
a__ : Optional[int] = False
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Optional[Any]=32) -> str:
"""simple docstring"""
set_seed(0)
lowercase__ = UNetaDModel(sample_size=lowerCAmelCase , in_channels=3 , out_channels=3)
lowercase__ = torch.optim.SGD(model.parameters() , lr=0.00_01)
return model, optimizer
@slow
def UpperCAmelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
lowercase__ = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowerCAmelCase , )
lowercase__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowerCAmelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
lowercase__ = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(lowerCAmelCase) for _ in range(4)]
lowercase__ = [torch.randn((4, 3, 32, 32)).to(lowerCAmelCase) for _ in range(4)]
lowercase__ = [torch.randint(0 , 10_00 , (4,)).long().to(lowerCAmelCase) for _ in range(4)]
# train with a DDPM scheduler
lowercase__, lowercase__ = self.get_model_optimizer(resolution=32)
model.train().to(lowerCAmelCase)
for i in range(4):
optimizer.zero_grad()
lowercase__ = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
lowercase__ = model(lowerCAmelCase , timesteps[i]).sample
lowercase__ = torch.nn.functional.mse_loss(lowerCAmelCase , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowercase__, lowercase__ = self.get_model_optimizer(resolution=32)
model.train().to(lowerCAmelCase)
for i in range(4):
optimizer.zero_grad()
lowercase__ = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
lowercase__ = model(lowerCAmelCase , timesteps[i]).sample
lowercase__ = torch.nn.functional.mse_loss(lowerCAmelCase , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5))
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-5))
| 642
|
import math
import sys
def _lowerCAmelCase ( A__ ):
lowercase__ = ''
try:
with open(A__ , 'rb' ) as binary_file:
lowercase__ = binary_file.read()
for dat in data:
lowercase__ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = {'0': '0', '1': '1'}
lowercase__, lowercase__ = '', ''
lowercase__ = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase__ = lexicon[curr_string]
result += last_match_id
lowercase__ = last_match_id + '0'
if math.loga(A__ ).is_integer():
lowercase__ = {}
for curr_key in list(A__ ):
lowercase__ = lexicon.pop(A__ )
lowercase__ = new_lex
lowercase__ = last_match_id + '1'
index += 1
lowercase__ = ''
return result
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = 8
try:
with open(A__ , 'wb' ) as opened_file:
lowercase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(A__ ) , A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase__ = data_bits[counter:]
lowercase__ = data_bits[counter + 1 :]
return data_bits
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = read_file_binary(A__ )
lowercase__ = remove_prefix(A__ )
lowercase__ = decompress_data(A__ )
write_file_binary(A__ , A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 642
| 1
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : str = ""
A : List[str] = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self : Optional[int] , lowerCAmelCase : Optional[DatasetInfo] = None , lowerCAmelCase : Optional[str] = None , **lowerCAmelCase : Any , ) -> str:
"""simple docstring"""
super().__init__(self , **lowerCAmelCase)
lowercase__ = repo_info
lowercase__ = token
lowercase__ = None
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
if self.dir_cache is None:
lowercase__ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase__ = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(lowerCAmelCase): {'name': str(lowerCAmelCase), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
})
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : str = "rb" , **lowerCAmelCase : Tuple , ) -> Any:
"""simple docstring"""
if not isinstance(self.repo_info , lowerCAmelCase):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''')
lowercase__ = hf_hub_url(self.repo_info.id , lowerCAmelCase , revision=self.repo_info.sha)
return fsspec.open(
lowerCAmelCase , mode=lowerCAmelCase , headers=get_authentication_headers_for_url(lowerCAmelCase , use_auth_token=self.token) , client_kwargs={'trust_env': True} , ).open()
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[str] , **lowerCAmelCase : Any) -> Dict:
"""simple docstring"""
self._get_dirs()
lowercase__ = self._strip_protocol(lowerCAmelCase)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCAmelCase)
def UpperCAmelCase ( self : Any , lowerCAmelCase : int , lowerCAmelCase : Optional[int]=False , **lowerCAmelCase : Union[str, Any]) -> str:
"""simple docstring"""
self._get_dirs()
lowercase__ = PurePosixPath(path.strip('/'))
lowercase__ = {}
for p, f in self.dir_cache.items():
lowercase__ = PurePosixPath(p.strip('/'))
lowercase__ = p.parent
if root == path:
lowercase__ = f
lowercase__ = list(paths.values())
if detail:
return out
else:
return sorted(f['name'] for f in out)
| 642
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Tuple = {"vocab_file": "vocab.txt"}
a__ : int = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
a__ : Dict = {
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def _lowerCAmelCase ( A__ ):
with open(A__ , 'r' ) as f:
lowercase__ = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = VOCAB_FILES_NAMES
A : str = PRETRAINED_VOCAB_FILES_MAP
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]="<unk>" , lowerCAmelCase : Dict="<cls>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : Union[str, Any]="<mask>" , lowerCAmelCase : Optional[Any]="<eos>" , **lowerCAmelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = load_vocab_file(lowerCAmelCase)
lowercase__ = dict(enumerate(self.all_tokens))
lowercase__ = {tok: ind for ind, tok in enumerate(self.all_tokens)}
lowercase__ = unk_token
lowercase__ = cls_token
lowercase__ = pad_token
lowercase__ = mask_token
lowercase__ = eos_token
lowercase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens)
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> Dict:
"""simple docstring"""
return text.split()
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any=False) -> Union[str, Any]:
"""simple docstring"""
return len(self._id_to_token)
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens)}
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str) -> int:
"""simple docstring"""
return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token))
def UpperCAmelCase ( self : Dict , lowerCAmelCase : int) -> str:
"""simple docstring"""
return self._id_to_token.get(lowerCAmelCase , self.unk_token)
def UpperCAmelCase ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.cls_token_id]
lowercase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!')
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List , lowerCAmelCase : Optional[List] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.')
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase__ = [1] + ([0] * len(lowerCAmelCase)) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCAmelCase) + [1]
return mask
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = os.path.join(lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt')
with open(lowerCAmelCase , 'w') as f:
f.write('\n'.join(self.all_tokens))
return (vocab_file,)
@property
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Union[List[str], List[AddedToken]] , lowerCAmelCase : bool = False) -> int:
"""simple docstring"""
return super()._add_tokens(lowerCAmelCase , special_tokens=lowerCAmelCase)
| 642
| 1
|
from itertools import product
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = sides_number
lowercase__ = max_face_number * dice_number
lowercase__ = [0] * (max_total + 1)
lowercase__ = 1
lowercase__ = range(A__ , max_face_number + 1 )
for dice_numbers in product(A__ , repeat=A__ ):
lowercase__ = sum(A__ )
totals_frequencies[total] += 1
return totals_frequencies
def _lowerCAmelCase ( ):
lowercase__ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowercase__ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowercase__ = 0
lowercase__ = 9
lowercase__ = 4 * 9
lowercase__ = 6
for peter_total in range(A__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowercase__ = (4**9) * (6**6)
lowercase__ = peter_wins_count / total_games_number
lowercase__ = round(A__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 642
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a__ : int = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
a__ : Optional[Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
a__ : Tuple = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any]) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'),
}) , )
def UpperCAmelCase ( self : int , lowerCAmelCase : List[List[List[str]]] , lowerCAmelCase : List[List[str]] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase , hypotheses=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase)
}
| 642
| 1
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
a__ : List[str] = TypeVar("T")
def _lowerCAmelCase ( A__ ):
return (position - 1) // 2
def _lowerCAmelCase ( A__ ):
return (2 * position) + 1
def _lowerCAmelCase ( A__ ):
return (2 * position) + 2
class UpperCAmelCase__( Generic[T] ):
'''simple docstring'''
def __init__( self : List[Any]) -> None:
"""simple docstring"""
lowercase__ = []
lowercase__ = {}
lowercase__ = 0
def __len__( self : str) -> int:
"""simple docstring"""
return self.elements
def __repr__( self : int) -> str:
"""simple docstring"""
return str(self.heap)
def UpperCAmelCase ( self : Union[str, Any]) -> bool:
"""simple docstring"""
return self.elements == 0
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : T , lowerCAmelCase : int) -> None:
"""simple docstring"""
self.heap.append((elem, weight))
lowercase__ = self.elements
self.elements += 1
self._bubble_up(lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> T:
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1)
lowercase__, lowercase__ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowercase__, lowercase__ = self.heap[0]
self._bubble_down(lowerCAmelCase)
return elem
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : T , lowerCAmelCase : int) -> None:
"""simple docstring"""
lowercase__ = self.position_map[elem]
lowercase__ = (elem, weight)
if position > 0:
lowercase__ = get_parent_position(lowerCAmelCase)
lowercase__, lowercase__ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(lowerCAmelCase)
else:
self._bubble_down(lowerCAmelCase)
else:
self._bubble_down(lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : T) -> None:
"""simple docstring"""
lowercase__ = self.position_map[elem]
if curr_pos == 0:
return None
lowercase__ = get_parent_position(lowerCAmelCase)
lowercase__, lowercase__ = self.heap[curr_pos]
lowercase__, lowercase__ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(lowerCAmelCase , lowerCAmelCase)
return self._bubble_up(lowerCAmelCase)
return None
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : T) -> None:
"""simple docstring"""
lowercase__ = self.position_map[elem]
lowercase__, lowercase__ = self.heap[curr_pos]
lowercase__ = get_child_left_position(lowerCAmelCase)
lowercase__ = get_child_right_position(lowerCAmelCase)
if child_left_position < self.elements and child_right_position < self.elements:
lowercase__, lowercase__ = self.heap[child_left_position]
lowercase__, lowercase__ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(lowerCAmelCase , lowerCAmelCase)
return self._bubble_down(lowerCAmelCase)
if child_left_position < self.elements:
lowercase__, lowercase__ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(lowerCAmelCase , lowerCAmelCase)
return self._bubble_down(lowerCAmelCase)
else:
return None
if child_right_position < self.elements:
lowercase__, lowercase__ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(lowerCAmelCase , lowerCAmelCase)
return self._bubble_down(lowerCAmelCase)
return None
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : int) -> None:
"""simple docstring"""
lowercase__ = self.heap[nodea_pos][0]
lowercase__ = self.heap[nodea_pos][0]
lowercase__, lowercase__ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowercase__ = nodea_pos
lowercase__ = nodea_pos
class UpperCAmelCase__( Generic[T] ):
'''simple docstring'''
def __init__( self : Dict) -> None:
"""simple docstring"""
lowercase__ = {}
lowercase__ = 0
def __repr__( self : Optional[int]) -> str:
"""simple docstring"""
return str(self.connections)
def __len__( self : Tuple) -> int:
"""simple docstring"""
return self.nodes
def UpperCAmelCase ( self : str , lowerCAmelCase : T) -> None:
"""simple docstring"""
if node not in self.connections:
lowercase__ = {}
self.nodes += 1
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : T , lowerCAmelCase : T , lowerCAmelCase : int) -> None:
"""simple docstring"""
self.add_node(lowerCAmelCase)
self.add_node(lowerCAmelCase)
lowercase__ = weight
lowercase__ = weight
def _lowerCAmelCase ( A__ , ):
lowercase__ = {node: maxsize for node in graph.connections}
lowercase__ = {node: None for node in graph.connections}
lowercase__ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(A__ , A__ )
if priority_queue.is_empty():
return dist, parent
# initialization
lowercase__ = priority_queue.extract_min()
lowercase__ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowercase__ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(A__ , dist[neighbour] )
lowercase__ = node
# running prim's algorithm
while not priority_queue.is_empty():
lowercase__ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowercase__ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(A__ , dist[neighbour] )
lowercase__ = node
return dist, parent
| 642
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Dict=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=99 , lowerCAmelCase : List[Any]=[1, 1, 2] , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : int=32 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : int=37 , lowerCAmelCase : Any="gelu_new" , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : str=5_12 , lowerCAmelCase : str=3 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]=False , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = block_sizes
lowercase__ = num_decoder_layers
lowercase__ = d_model
lowercase__ = n_head
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = hidden_act
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = 2
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase__ = n_head
# Used in the tests to check the size of the first hidden state
lowercase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase__ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase__ = self.num_hidden_layers + 2
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , ) -> str:
"""simple docstring"""
lowercase__ = TFFunnelForPreTraining(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForMaskedLM(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForSequenceClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = TFFunnelForMultipleChoice(config=lowerCAmelCase)
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForTokenClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForQuestionAnswering(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
A : Dict = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Optional[int] = False
A : Optional[int] = False
def UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase)
@require_tf
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
A : List[str] = False
A : int = False
def UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self , base=lowerCAmelCase)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase)
| 642
| 1
|
from __future__ import annotations
a__ : Tuple = [True] * 1_00_00_01
a__ : Tuple = 2
while i * i <= 1_00_00_00:
if seive[i]:
for j in range(i * i, 1_00_00_01, i):
a__ : Optional[int] = False
i += 1
def _lowerCAmelCase ( A__ ):
return seive[n]
def _lowerCAmelCase ( A__ ):
return any(digit in '02468' for digit in str(A__ ) )
def _lowerCAmelCase ( A__ = 1_000_000 ):
lowercase__ = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(A__ ) and not contains_an_even_digit(A__ ):
lowercase__ = str(A__ )
lowercase__ = [int(str_num[j:] + str_num[:j] ) for j in range(len(A__ ) )]
if all(is_prime(A__ ) for i in list_nums ):
result.append(A__ )
return result
def _lowerCAmelCase ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'''{len(find_circular_primes()) = }''')
| 642
|
def _lowerCAmelCase ( A__ , A__ , A__ ):
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
lowercase__ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowercase__ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 1
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
a__ : List[Any] = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
a__ : List[Any] = logging.get_logger(__name__)
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Any = "maskformer"
A : str = {"hidden_size": "mask_feature_size"}
A : Optional[int] = ["resnet", "swin"]
A : Dict = ["detr"]
def __init__( self : Tuple , lowerCAmelCase : int = 2_56 , lowerCAmelCase : int = 2_56 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[Dict] = None , lowerCAmelCase : Optional[Dict] = None , lowerCAmelCase : float = 0.02 , lowerCAmelCase : float = 1.0 , lowerCAmelCase : float = 1.0 , lowerCAmelCase : float = 1.0 , lowerCAmelCase : float = 20.0 , lowerCAmelCase : Optional[bool] = None , **lowerCAmelCase : List[Any] , ) -> Tuple:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase__ = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowerCAmelCase , lowerCAmelCase):
lowercase__ = backbone_config.pop('model_type')
lowercase__ = CONFIG_MAPPING[backbone_model_type]
lowercase__ = config_class.from_dict(lowerCAmelCase)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {','.join(self.backbones_supported)}''')
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase__ = DetrConfig()
else:
# verify that the decoder is supported
lowercase__ = (
decoder_config.pop('model_type') if isinstance(lowerCAmelCase , lowerCAmelCase) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {','.join(self.decoders_supported)}''')
if isinstance(lowerCAmelCase , lowerCAmelCase):
lowercase__ = CONFIG_MAPPING[decoder_type]
lowercase__ = config_class.from_dict(lowerCAmelCase)
lowercase__ = backbone_config
lowercase__ = decoder_config
# main feature dimension for the model
lowercase__ = fpn_feature_size
lowercase__ = mask_feature_size
# initializer
lowercase__ = init_std
lowercase__ = init_xavier_std
# Hungarian matcher && loss
lowercase__ = cross_entropy_weight
lowercase__ = dice_weight
lowercase__ = mask_weight
lowercase__ = use_auxiliary_loss
lowercase__ = no_object_weight
lowercase__ = output_auxiliary_logits
lowercase__ = self.decoder_config.encoder_attention_heads
lowercase__ = self.decoder_config.num_hidden_layers
super().__init__(**lowerCAmelCase)
@classmethod
def UpperCAmelCase ( cls : Optional[Any] , lowerCAmelCase : PretrainedConfig , lowerCAmelCase : PretrainedConfig , **lowerCAmelCase : List[Any]) -> Dict:
"""simple docstring"""
return cls(
backbone_config=lowerCAmelCase , decoder_config=lowerCAmelCase , **lowerCAmelCase , )
def UpperCAmelCase ( self : int) -> Dict[str, any]:
"""simple docstring"""
lowercase__ = copy.deepcopy(self.__dict__)
lowercase__ = self.backbone_config.to_dict()
lowercase__ = self.decoder_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 642
|
from __future__ import annotations
def _lowerCAmelCase ( A__ , A__ ):
if b == 0:
return (1, 0)
((lowercase__), (lowercase__)) = extended_euclid(A__ , a % b )
lowercase__ = a // b
return (y, x - k * y)
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
def _lowerCAmelCase ( A__ , A__ ):
((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ )
if b < 0:
lowercase__ = (b % n + n) % n
return b
def _lowerCAmelCase ( A__ , A__ , A__ , A__ ):
lowercase__, lowercase__ = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 642
| 1
|
import math
import sys
def _lowerCAmelCase ( A__ ):
lowercase__ = ''
try:
with open(A__ , 'rb' ) as binary_file:
lowercase__ = binary_file.read()
for dat in data:
lowercase__ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = {'0': '0', '1': '1'}
lowercase__, lowercase__ = '', ''
lowercase__ = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase__ = lexicon[curr_string]
result += last_match_id
lowercase__ = last_match_id + '0'
if math.loga(A__ ).is_integer():
lowercase__ = {}
for curr_key in list(A__ ):
lowercase__ = lexicon.pop(A__ )
lowercase__ = new_lex
lowercase__ = last_match_id + '1'
index += 1
lowercase__ = ''
return result
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = 8
try:
with open(A__ , 'wb' ) as opened_file:
lowercase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(A__ ) , A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _lowerCAmelCase ( A__ ):
lowercase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase__ = data_bits[counter:]
lowercase__ = data_bits[counter + 1 :]
return data_bits
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = read_file_binary(A__ )
lowercase__ = remove_prefix(A__ )
lowercase__ = decompress_data(A__ )
write_file_binary(A__ , A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 642
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[Any] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = "umt5"
A : List[str] = ["past_key_values"]
def __init__( self : List[Any] , lowerCAmelCase : Optional[int]=25_01_12 , lowerCAmelCase : str=5_12 , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : int=32 , lowerCAmelCase : int=1_28 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=1E-6 , lowerCAmelCase : Optional[int]=1.0 , lowerCAmelCase : Optional[Any]="gated-gelu" , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]="T5Tokenizer" , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=0 , **lowerCAmelCase : int , ) -> str:
"""simple docstring"""
super().__init__(
is_encoder_decoder=lowerCAmelCase , tokenizer_class=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_kv
lowercase__ = d_ff
lowercase__ = num_layers
lowercase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ = num_heads
lowercase__ = relative_attention_num_buckets
lowercase__ = relative_attention_max_distance
lowercase__ = dropout_rate
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_factor
lowercase__ = feed_forward_proj
lowercase__ = use_cache
lowercase__ = self.feed_forward_proj.split('-')
lowercase__ = act_info[-1]
lowercase__ = act_info[0] == 'gated'
if len(lowerCAmelCase) > 1 and act_info[0] != "gated" or len(lowerCAmelCase) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
lowercase__ = 'gelu_new'
@property
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
return self.d_model
@property
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
return self.num_heads
@property
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return self.num_layers
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCAmelCase ( self : Optional[int]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase__ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowercase__ = 'past_encoder_sequence + sequence'
lowercase__ = {0: 'batch'}
lowercase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return 13
@property
def UpperCAmelCase ( self : Optional[Any]) -> float:
"""simple docstring"""
return 5E-4
| 642
| 1
|
import heapq
def _lowerCAmelCase ( A__ ):
lowercase__ = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(A__ , [-1 * len(A__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase__ = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase__ = heapq.heappop(A__ )[1][0]
chosen_vertices.add(A__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase__ = elem[1][1].index(A__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(A__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : List[str] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 642
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : str = XGLMTokenizer
A : List[Any] = XGLMTokenizerFast
A : int = True
A : Optional[Any] = True
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = '<pad>'
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase) , lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase) , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(len(lowerCAmelCase) , 10_08)
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_08)
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
lowercase__ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
return XGLMTokenizer.from_pretrained('facebook/xglm-564M')
def UpperCAmelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase , f.name)
lowercase__ = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase)
lowercase__ = pickle.dumps(lowerCAmelCase)
pickle.loads(lowerCAmelCase)
def UpperCAmelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = 'I was born in 92000, and this is falsé.'
lowercase__ = tokenizer.tokenize(lowerCAmelCase)
lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
@slow
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
lowercase__ = 'Hello World!'
lowercase__ = [2, 3_12_27, 44_47, 35]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowercase__ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = {
'input_ids': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name='facebook/xglm-564M' , padding=lowerCAmelCase , )
| 642
| 1
|
from __future__ import annotations
from math import gcd
def _lowerCAmelCase ( A__ , A__ = 2 , A__ = 1 , A__ = 3 , ):
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(A__ , A__ , A__ ) -> int:
return (pow(A__ , 2 ) + step) % modulus
for _ in range(A__ ):
# These track the position within the cycle detection logic.
lowercase__ = seed
lowercase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowercase__ = rand_fn(A__ , A__ , A__ )
lowercase__ = rand_fn(A__ , A__ , A__ )
lowercase__ = rand_fn(A__ , A__ , A__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowercase__ = gcd(hare - tortoise , A__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowercase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
a__ : Any = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
a__ : Union[str, Any] = parser.parse_args()
a__ : Union[str, Any] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
a__ : Any = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 642
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
lowercase__ = data
lowercase__ = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0]
@staticmethod
def UpperCAmelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]) -> str:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = B'\x80' + B'\x00' * (63 - (len(self.data) + 8) % 64)
lowercase__ = self.data + padding + struct.pack('>Q' , 8 * len(self.data))
return padded_data
def UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64)
]
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> List[Any]:
"""simple docstring"""
lowercase__ = list(struct.unpack('>16L' , lowerCAmelCase)) + [0] * 64
for i in range(16 , 80):
lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1)
return w
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.padding()
lowercase__ = self.split_blocks()
for block in self.blocks:
lowercase__ = self.expand_block(lowerCAmelCase)
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.h
for i in range(0 , 80):
if 0 <= i < 20:
lowercase__ = (b & c) | ((~b) & d)
lowercase__ = 0X5_A_8_2_7_9_9_9
elif 20 <= i < 40:
lowercase__ = b ^ c ^ d
lowercase__ = 0X6_E_D_9_E_B_A_1
elif 40 <= i < 60:
lowercase__ = (b & c) | (b & d) | (c & d)
lowercase__ = 0X8_F_1_B_B_C_D_C
elif 60 <= i < 80:
lowercase__ = b ^ c ^ d
lowercase__ = 0XC_A_6_2_C_1_D_6
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = (
self.rotate(lowerCAmelCase , 5) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F,
a,
self.rotate(lowerCAmelCase , 30),
c,
d,
)
lowercase__ = (
self.h[0] + a & 0XF_F_F_F_F_F_F_F,
self.h[1] + b & 0XF_F_F_F_F_F_F_F,
self.h[2] + c & 0XF_F_F_F_F_F_F_F,
self.h[3] + d & 0XF_F_F_F_F_F_F_F,
self.h[4] + e & 0XF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h)
def _lowerCAmelCase ( ):
lowercase__ = B'Test String'
assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' )
lowercase__ = parser.parse_args()
lowercase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
lowercase__ = f.read()
else:
lowercase__ = bytes(A__ , 'utf-8' )
print(SHAaHash(A__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 642
| 1
|
import socket
def _lowerCAmelCase ( ):
lowercase__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowercase__ = socket.gethostname()
lowercase__ = 12_312
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
lowercase__ = sock.recv(1_024 )
if not data:
break
out_file.write(A__ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 642
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
a__ : List[Any] = logging.get_logger(__name__)
a__ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
a__ : List[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
a__ : int = {
"facebook/bart-base": 10_24,
"facebook/bart-large": 10_24,
"facebook/bart-large-mnli": 10_24,
"facebook/bart-large-cnn": 10_24,
"facebook/bart-large-xsum": 10_24,
"yjernite/bart_eli5": 10_24,
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Optional[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = ["input_ids", "attention_mask"]
A : Any = BartTokenizer
def __init__( self : List[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str="replace" , lowerCAmelCase : str="<s>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : str="<unk>" , lowerCAmelCase : int="<pad>" , lowerCAmelCase : int="<mask>" , lowerCAmelCase : Dict=False , lowerCAmelCase : List[Any]=True , **lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = getattr(lowerCAmelCase , pre_tok_state.pop('type'))
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**lowerCAmelCase)
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = 'post_processor'
lowercase__ = getattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state['sep'])
if "cls" in state:
lowercase__ = tuple(state['cls'])
lowercase__ = False
if state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get('trim_offsets' , lowerCAmelCase) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(lowerCAmelCase , state.pop('type'))
lowercase__ = component_class(**lowerCAmelCase)
setattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase)
@property
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else value
lowercase__ = value
def UpperCAmelCase ( self : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[int]) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> BatchEncoding:
"""simple docstring"""
lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase)
return tuple(lowerCAmelCase)
def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=None) -> Tuple:
"""simple docstring"""
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 642
| 1
|
def _lowerCAmelCase ( A__ ):
lowercase__ = [[0 for _ in range(A__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowercase__ = 1
for n in range(m + 1 ):
for k in range(1 , A__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
a__ : str = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
a__ : List[str] = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 642
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : str = (DDIMParallelScheduler,)
A : Any = (("eta", 0.0), ("num_inference_steps", 50))
def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> Dict:
"""simple docstring"""
lowercase__ = {
'num_train_timesteps': 10_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**lowerCAmelCase)
return config
def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**lowerCAmelCase)
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase)
for t in scheduler.timesteps:
lowercase__ = model(lowerCAmelCase , lowerCAmelCase)
lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase).prev_sample
return sample
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase)
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase)
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(steps_offset=1)
lowercase__ = scheduler_class(**lowerCAmelCase)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1]))
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase)
def UpperCAmelCase ( self : Optional[int]) -> int:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> List[str]:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , )
def UpperCAmelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00]):
self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00) - 0.1_47_71)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60) - 0.3_24_60)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86) - 0.0_09_79)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98) - 0.02)) < 1E-5
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCAmelCase)
lowercase__, lowercase__ = 10, 0.0
scheduler.set_timesteps(lowerCAmelCase)
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = self.dummy_sample_deter + 0.1
lowercase__ = self.dummy_sample_deter - 0.1
lowercase__ = samplea.shape[0]
lowercase__ = torch.stack([samplea, samplea, samplea] , dim=0)
lowercase__ = torch.arange(lowerCAmelCase)[0:3, None].repeat(1 , lowerCAmelCase)
lowercase__ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowercase__ = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , lowerCAmelCase)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 11_47.79_04) < 1E-2
assert abs(result_mean.item() - 0.49_82) < 1E-3
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
lowercase__ = self.full_loop()
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_72.00_67) < 1E-2
assert abs(result_mean.item() - 0.22_39_67) < 1E-3
def UpperCAmelCase ( self : int) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(prediction_type='v_prediction')
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 52.53_02) < 1E-2
assert abs(result_mean.item() - 0.06_84) < 1E-3
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.82_95) < 1E-2
assert abs(result_mean.item() - 0.19_51) < 1E-3
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01)
lowercase__ = torch.sum(torch.abs(lowerCAmelCase))
lowercase__ = torch.mean(torch.abs(lowerCAmelCase))
assert abs(result_sum.item() - 1_49.07_84) < 1E-2
assert abs(result_mean.item() - 0.19_41) < 1E-3
| 642
| 1
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : str , lowerCAmelCase : Any , lowerCAmelCase : List[str]=2 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : int=False , lowerCAmelCase : Optional[int]=10 , lowerCAmelCase : str=3 , lowerCAmelCase : List[Any]=32 * 4 , lowerCAmelCase : int=32 * 6 , lowerCAmelCase : int=4 , lowerCAmelCase : Tuple=32 , ) -> int:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = is_training
lowercase__ = use_auxiliary_loss
lowercase__ = num_queries
lowercase__ = num_channels
lowercase__ = min_size
lowercase__ = max_size
lowercase__ = num_labels
lowercase__ = mask_feature_size
def UpperCAmelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
lowerCAmelCase)
lowercase__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCAmelCase)
lowercase__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCAmelCase) > 0.5
).float()
lowercase__ = (torch.rand((self.batch_size, self.num_labels) , device=lowerCAmelCase) > 0.5).long()
lowercase__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.prepare_config_and_inputs()
lowercase__ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Dict) -> Optional[Any]:
"""simple docstring"""
lowercase__ = output.encoder_hidden_states
lowercase__ = output.pixel_decoder_hidden_states
lowercase__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCAmelCase) , len(config.backbone_config.depths))
self.parent.assertTrue(len(lowerCAmelCase) , len(config.backbone_config.depths))
self.parent.assertTrue(len(lowerCAmelCase) , config.decoder_config.decoder_layers)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : str=False) -> str:
"""simple docstring"""
with torch.no_grad():
lowercase__ = MaskFormerModel(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
lowercase__ = model(pixel_values=lowerCAmelCase , pixel_mask=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase , output_hidden_states=lowerCAmelCase)
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = MaskFormerForInstanceSegmentation(config=lowerCAmelCase)
model.to(lowerCAmelCase)
model.eval()
def comm_check_on_output(lowerCAmelCase : List[Any]):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
lowercase__ = model(pixel_values=lowerCAmelCase , pixel_mask=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
comm_check_on_output(lowerCAmelCase)
lowercase__ = model(
pixel_values=lowerCAmelCase , pixel_mask=lowerCAmelCase , mask_labels=lowerCAmelCase , class_labels=lowerCAmelCase)
comm_check_on_output(lowerCAmelCase)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
A : Dict = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
A : Any = False
A : Tuple = False
A : Union[str, Any] = False
A : List[str] = False
def UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
lowercase__ = MaskFormerModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase)
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCAmelCase , **lowerCAmelCase , output_hidden_states=lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCAmelCase)
@unittest.skip(reason='MaskFormer does not use inputs_embeds')
def UpperCAmelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method')
def UpperCAmelCase ( self : str) -> int:
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormer is not a generative model')
def UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormer does not use token embeddings')
def UpperCAmelCase ( self : str) -> Any:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def UpperCAmelCase ( self : Dict) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCAmelCase)
lowercase__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase)
@slow
def UpperCAmelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowercase__ = MaskFormerModel.from_pretrained(lowerCAmelCase)
self.assertIsNotNone(lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (self.model_tester.min_size,) * 2
lowercase__ = {
'pixel_values': torch.randn((2, 3, *size) , device=lowerCAmelCase),
'mask_labels': torch.randn((2, 10, *size) , device=lowerCAmelCase),
'class_labels': torch.zeros(2 , 10 , device=lowerCAmelCase).long(),
}
lowercase__ = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(lowerCAmelCase)
lowercase__ = model(**lowerCAmelCase)
self.assertTrue(outputs.loss is not None)
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCAmelCase , **lowerCAmelCase , output_hidden_states=lowerCAmelCase)
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCAmelCase).to(lowerCAmelCase)
lowercase__ = model(**lowerCAmelCase , output_attentions=lowerCAmelCase)
self.assertTrue(outputs.attentions is not None)
def UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowercase__ = self.all_model_classes[1]
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs()
lowercase__ = model_class(lowerCAmelCase)
model.to(lowerCAmelCase)
model.train()
lowercase__ = model(lowerCAmelCase , mask_labels=lowerCAmelCase , class_labels=lowerCAmelCase).loss
loss.backward()
def UpperCAmelCase ( self : int) -> List[str]:
"""simple docstring"""
lowercase__ = self.all_model_classes[1]
lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs()
lowercase__ = True
lowercase__ = True
lowercase__ = model_class(lowerCAmelCase)
model.to(lowerCAmelCase)
model.train()
lowercase__ = model(lowerCAmelCase , mask_labels=lowerCAmelCase , class_labels=lowerCAmelCase)
lowercase__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowercase__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCAmelCase)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
a__ : Any = 1E-4
def _lowerCAmelCase ( ):
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class UpperCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco')
if is_vision_available()
else None
)
def UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
lowercase__ = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco').to(lowerCAmelCase)
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(lowerCAmelCase , return_tensors='pt').to(lowerCAmelCase)
lowercase__ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(lowerCAmelCase , (1, 3, 8_00, 10_88))
with torch.no_grad():
lowercase__ = model(**lowerCAmelCase)
lowercase__ = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]]).to(lowerCAmelCase)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase))
lowercase__ = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]]).to(lowerCAmelCase)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase))
lowercase__ = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]]).to(lowerCAmelCase)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase))
def UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
lowercase__ = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco')
.to(lowerCAmelCase)
.eval()
)
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(lowerCAmelCase , return_tensors='pt').to(lowerCAmelCase)
lowercase__ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(lowerCAmelCase , (1, 3, 8_00, 10_88))
with torch.no_grad():
lowercase__ = model(**lowerCAmelCase)
# masks_queries_logits
lowercase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowercase__ = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
lowercase__ = torch.tensor(lowerCAmelCase).to(lowerCAmelCase)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase))
# class_queries_logits
lowercase__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
lowercase__ = torch.tensor(
[
[1.6_5_1_2E0_0, -5.2_5_7_2E0_0, -3.3_5_1_9E0_0],
[3.6_1_6_9E-0_2, -5.9_0_2_5E0_0, -2.9_3_1_3E0_0],
[1.0_7_6_6E-0_4, -7.7_6_3_0E0_0, -5.1_2_6_3E0_0],
]).to(lowerCAmelCase)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase))
def UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff')
.to(lowerCAmelCase)
.eval()
)
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(lowerCAmelCase , return_tensors='pt').to(lowerCAmelCase)
lowercase__ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(lowerCAmelCase , (1, 3, 8_00, 10_88))
with torch.no_grad():
lowercase__ = model(**lowerCAmelCase)
# masks_queries_logits
lowercase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowercase__ = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
lowercase__ = torch.tensor(lowerCAmelCase).to(lowerCAmelCase)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase))
# class_queries_logits
lowercase__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
lowercase__ = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]]).to(lowerCAmelCase)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase))
def UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
lowercase__ = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco')
.to(lowerCAmelCase)
.eval()
)
lowercase__ = self.default_image_processor
lowercase__ = image_processor(
[np.zeros((3, 8_00, 13_33)), np.zeros((3, 8_00, 13_33))] , segmentation_maps=[np.zeros((3_84, 3_84)).astype(np.floataa), np.zeros((3_84, 3_84)).astype(np.floataa)] , return_tensors='pt' , )
lowercase__ = inputs['pixel_values'].to(lowerCAmelCase)
lowercase__ = [el.to(lowerCAmelCase) for el in inputs['mask_labels']]
lowercase__ = [el.to(lowerCAmelCase) for el in inputs['class_labels']]
with torch.no_grad():
lowercase__ = model(**lowerCAmelCase)
self.assertTrue(outputs.loss is not None)
| 642
|
import cva
import numpy as np
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : float , lowerCAmelCase : int) -> Dict:
"""simple docstring"""
if k in (0.04, 0.06):
lowercase__ = k
lowercase__ = window_size
else:
raise ValueError('invalid k value')
def __str__( self : Tuple) -> str:
"""simple docstring"""
return str(self.k)
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
lowercase__ = cva.imread(lowerCAmelCase , 0)
lowercase__, lowercase__ = img.shape
lowercase__ = []
lowercase__ = img.copy()
lowercase__ = cva.cvtColor(lowerCAmelCase , cva.COLOR_GRAY2RGB)
lowercase__, lowercase__ = np.gradient(lowerCAmelCase)
lowercase__ = dx**2
lowercase__ = dy**2
lowercase__ = dx * dy
lowercase__ = 0.04
lowercase__ = self.window_size // 2
for y in range(lowerCAmelCase , h - offset):
for x in range(lowerCAmelCase , w - offset):
lowercase__ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ = (wxx * wyy) - (wxy**2)
lowercase__ = wxx + wyy
lowercase__ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0) , 0)
color_img.itemset((y, x, 1) , 0)
color_img.itemset((y, x, 2) , 2_55)
return color_img, corner_list
if __name__ == "__main__":
a__ : Dict = HarrisCorner(0.0_4, 3)
a__ , a__ : Dict = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 642
| 1
|
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowercase__ = ksize + 1
lowercase__ = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(A__ ):
for x in range(A__ ):
# distance from center
lowercase__ = x - ksize // 2
lowercase__ = y - ksize // 2
# degree to radiant
lowercase__ = theta / 180 * np.pi
lowercase__ = np.cos(_theta )
lowercase__ = np.sin(_theta )
# get kernel x
lowercase__ = cos_theta * px + sin_theta * py
# get kernel y
lowercase__ = -sin_theta * px + cos_theta * py
# fill kernel
lowercase__ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
a__ : Any = imread("../image_data/lena.jpg")
# turn image in gray scale value
a__ : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
a__ : List[Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
a__ : Optional[Any] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
a__ : List[Any] = out / out.max() * 2_55
a__ : str = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 642
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Dict = logging.get_logger(__name__)
a__ : List[Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : int = "speech_to_text"
A : Optional[Any] = ["past_key_values"]
A : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[int] , lowerCAmelCase : Tuple=1_00_00 , lowerCAmelCase : int=12 , lowerCAmelCase : int=20_48 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : str=6 , lowerCAmelCase : Dict=20_48 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict="relu" , lowerCAmelCase : Tuple=2_56 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Any=60_00 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[Any]=(5, 5) , lowerCAmelCase : Union[str, Any]=10_24 , lowerCAmelCase : List[Any]=80 , lowerCAmelCase : List[str]=1 , **lowerCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_source_positions
lowercase__ = max_target_positions
lowercase__ = num_conv_layers
lowercase__ = list(lowerCAmelCase)
lowercase__ = conv_channels
lowercase__ = input_feat_per_channel
lowercase__ = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''')
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 642
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.