code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from collections.abc import Callable
def a ( snake_case__: Callable[[float], float] , snake_case__: float , snake_case__: float ):
'''simple docstring'''
lowercase_ = a
lowercase_ = b
if function(snake_case__ ) == 0: # one of the a or b is a root for the function
return a
elif function(snake_case__ ) == 0:
return b
elif (
function(snake_case__ ) * function(snake_case__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
lowercase_ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(snake_case__ ) == 0:
return mid
elif function(snake_case__ ) * function(snake_case__ ) < 0:
lowercase_ = mid
else:
lowercase_ = mid
lowercase_ = start + (end - start) / 2.0
return mid
def a ( snake_case__: float ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 97 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : int = 16
__lowerCamelCase : Tuple = 32
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ = 1_6 ) -> int:
A__ : Tuple =AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ : Union[str, Any] =DatasetDict(
{
'''train''': dataset['''train'''].select(snake_case_ ),
'''validation''': dataset['''train'''].select(snake_case_ ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(snake_case_ ):
# max_length=None => use the model max length (it's actually the default)
A__ : int =tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=snake_case_, max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ : Optional[Any] =datasets.map(
snake_case_, batched=snake_case_, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : Tuple =tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(snake_case_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ : Optional[int] =1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ : Tuple =1_6
elif accelerator.mixed_precision != "no":
A__ : List[Any] =8
else:
A__ : Any =None
return tokenizer.pad(
snake_case_, padding='''longest''', max_length=snake_case_, pad_to_multiple_of=snake_case_, return_tensors='''pt''', )
# Instantiate dataloaders.
A__ : Optional[int] =DataLoader(
tokenized_datasets['''train'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ )
A__ : List[Any] =DataLoader(
tokenized_datasets['''validation'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ )
A__ : List[str] =DataLoader(
tokenized_datasets['''test'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ )
return train_dataloader, eval_dataloader, test_dataloader
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[str]:
# New Code #
A__ : Union[str, Any] =[]
# Download the dataset
A__ : Optional[int] =load_dataset('''glue''', '''mrpc''' )
# Create our splits
A__ : List[Any] =StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
A__ : Dict =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : Any =config['''lr''']
A__ : Optional[Any] =int(config['''num_epochs'''] )
A__ : Dict =int(config['''seed'''] )
A__ : Tuple =int(config['''batch_size'''] )
A__ : Union[str, Any] =evaluate.load('''glue''', '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A__ : Any =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ : Optional[Any] =batch_size // MAX_GPU_BATCH_SIZE
A__ : Any =MAX_GPU_BATCH_SIZE
set_seed(snake_case_ )
# New Code #
# Create our folds:
A__ : List[str] =kfold.split(np.zeros(datasets['''train'''].num_rows ), datasets['''train''']['''label'''] )
A__ : Any =[]
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(snake_case_ ):
A__ , A__ , A__ : str =get_fold_dataloaders(
snake_case_, snake_case_, snake_case_, snake_case_, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Tuple =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=snake_case_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ : str =model.to(accelerator.device )
# Instantiate optimizer
A__ : Tuple =AdamW(params=model.parameters(), lr=snake_case_ )
# Instantiate scheduler
A__ : List[Any] =get_linear_schedule_with_warmup(
optimizer=snake_case_, num_warmup_steps=1_0_0, num_training_steps=(len(snake_case_ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : Tuple =accelerator.prepare(
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ )
# Now we train the model
for epoch in range(snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ : str =model(**snake_case_ )
A__ : List[Any] =outputs.loss
A__ : int =loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : Dict =model(**snake_case_ )
A__ : Any =outputs.logits.argmax(dim=-1 )
A__ , A__ : List[str] =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case_, references=snake_case_, )
A__ : Dict =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:', snake_case_ )
# New Code #
# We also run predictions on the test set at the very end
A__ : str =[]
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ : str =model(**snake_case_ )
A__ : Dict =outputs.logits
A__ , A__ : Any =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(snake_case_, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
A__ : List[str] =torch.cat(snake_case_, dim=0 )
A__ : Union[str, Any] =torch.stack(snake_case_, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
A__ : List[Any] =metric.compute(predictions=snake_case_, references=snake_case_ )
accelerator.print('''Average test metrics from all folds:''', snake_case_ )
def SCREAMING_SNAKE_CASE__ ( ) -> str:
A__ : int =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=snake_case_, default=snake_case_, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''', type=snake_case_, default=3, help='''The number of splits to perform across the dataset''' )
A__ : List[str] =parser.parse_args()
A__ : Optional[Any] ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(snake_case_, snake_case_ )
if __name__ == "__main__":
main()
| 416 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Optional[int] = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def UpperCamelCase () -> Dict:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def UpperCamelCase () -> Tuple:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class _a (__magic_name__ ):
'''simple docstring'''
@require_beam
def __A ( self ):
A__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : int = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
import apache_beam as beam
A__ : int = beam.io.parquetio.WriteToParquet
A__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : str = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
A__ : Optional[Any] = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __A ( self ):
A__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : Optional[int] = NestedBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 64 | 0 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
A : Dict = "bert-base-cased"
A : List[str] = "google/pegasus-xsum"
A : str = [" Sam ate lunch today.", "Sams lunch ingredients."]
A : List[str] = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
A : List[str] = "patrickvonplaten/t5-tiny-random"
A : Optional[Any] = "sshleifer/bart-tiny-random"
A : Any = "sshleifer/tiny-mbart"
A : Dict = "sshleifer/tiny-marian-en-de"
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = "\n".join(_lowerCAmelCase )
Path(_lowerCAmelCase ).open("w" ).writelines(_lowerCAmelCase )
def a__ ( __UpperCamelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_lowerCAmelCase , F'''{split}.source''' ) , _lowerCAmelCase )
_dump_articles(os.path.join(_lowerCAmelCase , F'''{split}.target''' ) , _lowerCAmelCase )
return tmp_dir
class lowerCamelCase (__UpperCAmelCase ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __A ( self : List[str] , __magic_name__ : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE_ = max(len(tokenizer.encode(_lowerCamelCase ) ) for a in ARTICLES )
SCREAMING_SNAKE_CASE_ = max(len(tokenizer.encode(_lowerCamelCase ) ) for a in SUMMARIES )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
SCREAMING_SNAKE_CASE_ = SeqaSeqDataset(
_lowerCamelCase , data_dir=_lowerCamelCase , type_path="train" , max_source_length=_lowerCamelCase , max_target_length=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , )
SCREAMING_SNAKE_CASE_ = DataLoader(_lowerCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
SCREAMING_SNAKE_CASE_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __A ( self : Dict , __magic_name__ : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE_ = max(len(tokenizer.encode(_lowerCamelCase ) ) for a in ARTICLES )
SCREAMING_SNAKE_CASE_ = max(len(tokenizer.encode(_lowerCamelCase ) ) for a in SUMMARIES )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = LegacySeqaSeqDataset(
_lowerCamelCase , data_dir=_lowerCamelCase , type_path="train" , max_source_length=20 , max_target_length=_lowerCamelCase , )
SCREAMING_SNAKE_CASE_ = DataLoader(_lowerCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __A ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
SCREAMING_SNAKE_CASE_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
SCREAMING_SNAKE_CASE_ = tmp_dir.joinpath("train.source" ).open().readlines()
SCREAMING_SNAKE_CASE_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_lowerCamelCase , _lowerCamelCase , 128 , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = {x.name for x in tmp_dir.iterdir()}
SCREAMING_SNAKE_CASE_ = {x.name for x in save_dir.iterdir()}
SCREAMING_SNAKE_CASE_ = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_lowerCamelCase ) < len(_lowerCamelCase )
assert len(_lowerCamelCase ) == 1
assert len(packed_examples[0] ) == sum(len(_lowerCamelCase ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def __A ( self : List[str] ) -> List[str]:
if not FAIRSEQ_AVAILABLE:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_dataset(max_len=64 )
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = ds.make_dynamic_sampler(_lowerCamelCase , required_batch_size_multiple=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [len(_lowerCamelCase ) for x in batch_sampler]
assert len(set(_lowerCamelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_lowerCamelCase ) == len(_lowerCamelCase ) # no dropped or added examples
SCREAMING_SNAKE_CASE_ = DataLoader(_lowerCamelCase , batch_sampler=_lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for batch in data_loader:
SCREAMING_SNAKE_CASE_ = batch["input_ids"].shape
SCREAMING_SNAKE_CASE_ = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
SCREAMING_SNAKE_CASE_ = np.product(batch["input_ids"].shape )
num_src_per_batch.append(_lowerCamelCase )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_lowerCamelCase )
assert num_src_per_batch[0] == max(_lowerCamelCase )
if failures:
raise AssertionError(F'''too many tokens in {len(_lowerCamelCase )} batches''' )
def __A ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_dataset(max_len=512 )
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = ds.make_sortish_sampler(_lowerCamelCase , shuffle=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = DataLoader(_lowerCamelCase , batch_size=_lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 )
SCREAMING_SNAKE_CASE_ = DataLoader(_lowerCamelCase , batch_size=_lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = tokenizer.pad_token_id
def count_pad_tokens(__magic_name__ : List[Any] , __magic_name__ : str="input_ids" ):
return [batch[k].eq(_lowerCamelCase ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_lowerCamelCase , k="labels" ) ) < sum(count_pad_tokens(_lowerCamelCase , k="labels" ) )
assert sum(count_pad_tokens(_lowerCamelCase ) ) < sum(count_pad_tokens(_lowerCamelCase ) )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
def __A ( self : Optional[Any] , __magic_name__ : str=1_000 , __magic_name__ : Tuple=128 ) -> Any:
if os.getenv("USE_REAL_DATA" , _lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = "examples/seq2seq/wmt_en_ro"
SCREAMING_SNAKE_CASE_ = max_len * 2 * 64
if not Path(_lowerCamelCase ).joinpath("train.len" ).exists():
save_len_file(_lowerCamelCase , _lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = "examples/seq2seq/test_data/wmt_en_ro"
SCREAMING_SNAKE_CASE_ = max_len * 4
save_len_file(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = SeqaSeqDataset(
_lowerCamelCase , data_dir=_lowerCamelCase , type_path="train" , max_source_length=_lowerCamelCase , max_target_length=_lowerCamelCase , n_obs=_lowerCamelCase , )
return ds, max_tokens, tokenizer
def __A ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_dataset()
SCREAMING_SNAKE_CASE_ = set(DistributedSortishSampler(_lowerCamelCase , 256 , num_replicas=2 , rank=0 , add_extra_examples=_lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = set(DistributedSortishSampler(_lowerCamelCase , 256 , num_replicas=2 , rank=1 , add_extra_examples=_lowerCamelCase ) )
assert idsa.intersection(_lowerCamelCase ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __A ( self : List[Any] , __magic_name__ : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(_lowerCamelCase , use_fast=_lowerCamelCase )
if tok_name == MBART_TINY:
SCREAMING_SNAKE_CASE_ = SeqaSeqDataset(
_lowerCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
SCREAMING_SNAKE_CASE_ = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
SCREAMING_SNAKE_CASE_ = SeqaSeqDataset(
_lowerCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
SCREAMING_SNAKE_CASE_ = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_lowerCamelCase ) == 1 if tok_name == BART_TINY else len(_lowerCamelCase ) == 0
| 140 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def a ():
SCREAMING_SNAKE_CASE_ = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
SCREAMING_SNAKE_CASE_ = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ = parser.parse_args()
if not hasattr(_lowerCAmelCase , '''func''' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 234 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> List[str]:
if not head:
return True
# split the list to two parts
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = head.next, head
while fast and fast.next:
SCREAMING_SNAKE_CASE__ = fast.next.next
SCREAMING_SNAKE_CASE__ = slow.next
SCREAMING_SNAKE_CASE__ = slow.next
SCREAMING_SNAKE_CASE__ = None # Don't forget here! But forget still works!
# reverse the second part
SCREAMING_SNAKE_CASE__ = None
while second:
SCREAMING_SNAKE_CASE__ = second.next
SCREAMING_SNAKE_CASE__ = node
SCREAMING_SNAKE_CASE__ = second
SCREAMING_SNAKE_CASE__ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
SCREAMING_SNAKE_CASE__ = node.next
SCREAMING_SNAKE_CASE__ = head.next
return True
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> str:
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = head
while fast and fast.next:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fast.next.next, slow.next
# 2. Push the second half into the stack
SCREAMING_SNAKE_CASE__ = [slow.val]
while slow.next:
SCREAMING_SNAKE_CASE__ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
SCREAMING_SNAKE_CASE__ = cur.next
return True
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> str:
if not head or not head.next:
return True
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 0
while head:
if head.val in d:
d[head.val].append(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ = [pos]
SCREAMING_SNAKE_CASE__ = head.next
pos += 1
SCREAMING_SNAKE_CASE__ = pos - 1
SCREAMING_SNAKE_CASE__ = 0
for v in d.values():
if len(__UpperCAmelCase ) % 2 != 0:
middle += 1
else:
SCREAMING_SNAKE_CASE__ = 0
for i in range(0 , len(__UpperCAmelCase ) ):
if v[i] + v[len(__UpperCAmelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 708 | """simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self : Dict , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : Optional[Any]=0.0 , _snake_case : Optional[int] = None , _snake_case : str = "geglu" , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : str = "layer_norm" , _snake_case : bool = False , ) -> Union[str, Any]:
super().__init__()
SCREAMING_SNAKE_CASE__ = only_cross_attention
SCREAMING_SNAKE_CASE__ = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
SCREAMING_SNAKE_CASE__ = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
SCREAMING_SNAKE_CASE__ = AdaLayerNorm(_snake_case , _snake_case )
elif self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE__ = AdaLayerNormZero(_snake_case , _snake_case )
else:
SCREAMING_SNAKE_CASE__ = nn.LayerNorm(_snake_case , elementwise_affine=_snake_case )
SCREAMING_SNAKE_CASE__ = Attention(
query_dim=_snake_case , heads=_snake_case , dim_head=_snake_case , dropout=_snake_case , bias=_snake_case , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_snake_case , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
SCREAMING_SNAKE_CASE__ = (
AdaLayerNorm(_snake_case , _snake_case )
if self.use_ada_layer_norm
else nn.LayerNorm(_snake_case , elementwise_affine=_snake_case )
)
SCREAMING_SNAKE_CASE__ = Attention(
query_dim=_snake_case , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_snake_case , dim_head=_snake_case , dropout=_snake_case , bias=_snake_case , upcast_attention=_snake_case , ) # is self-attn if encoder_hidden_states is none
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
# 3. Feed-forward
SCREAMING_SNAKE_CASE__ = nn.LayerNorm(_snake_case , elementwise_affine=_snake_case )
SCREAMING_SNAKE_CASE__ = FeedForward(_snake_case , dropout=_snake_case , activation_fn=_snake_case , final_dropout=_snake_case )
# let chunk size default to None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 0
def lowerCAmelCase_ ( self : Tuple , _snake_case : Optional[int] , _snake_case : int ) -> List[str]:
# Sets chunk feed-forward
SCREAMING_SNAKE_CASE__ = chunk_size
SCREAMING_SNAKE_CASE__ = dim
def lowerCAmelCase_ ( self : Optional[Any] , _snake_case : torch.FloatTensor , _snake_case : Optional[torch.FloatTensor] = None , _snake_case : Optional[torch.FloatTensor] = None , _snake_case : Optional[torch.FloatTensor] = None , _snake_case : Optional[torch.LongTensor] = None , _snake_case : Dict[str, Any] = None , _snake_case : Optional[torch.LongTensor] = None , ) -> Union[str, Any]:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
SCREAMING_SNAKE_CASE__ = self.norma(_snake_case , _snake_case )
elif self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.norma(
_snake_case , _snake_case , _snake_case , hidden_dtype=hidden_states.dtype )
else:
SCREAMING_SNAKE_CASE__ = self.norma(_snake_case )
SCREAMING_SNAKE_CASE__ = cross_attention_kwargs if cross_attention_kwargs is not None else {}
SCREAMING_SNAKE_CASE__ = self.attna(
_snake_case , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_snake_case , **_snake_case , )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE__ = gate_msa.unsqueeze(1 ) * attn_output
SCREAMING_SNAKE_CASE__ = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
SCREAMING_SNAKE_CASE__ = (
self.norma(_snake_case , _snake_case ) if self.use_ada_layer_norm else self.norma(_snake_case )
)
SCREAMING_SNAKE_CASE__ = self.attna(
_snake_case , encoder_hidden_states=_snake_case , attention_mask=_snake_case , **_snake_case , )
SCREAMING_SNAKE_CASE__ = attn_output + hidden_states
# 3. Feed-forward
SCREAMING_SNAKE_CASE__ = self.norma(_snake_case )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE__ = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
SCREAMING_SNAKE_CASE__ = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
SCREAMING_SNAKE_CASE__ = torch.cat(
[self.ff(_snake_case ) for hid_slice in norm_hidden_states.chunk(_snake_case , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
SCREAMING_SNAKE_CASE__ = self.ff(_snake_case )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE__ = gate_mlp.unsqueeze(1 ) * ff_output
SCREAMING_SNAKE_CASE__ = ff_output + hidden_states
return hidden_states
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , _snake_case : int , _snake_case : Optional[int] = None , _snake_case : int = 4 , _snake_case : float = 0.0 , _snake_case : str = "geglu" , _snake_case : bool = False , ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE__ = int(dim * mult )
SCREAMING_SNAKE_CASE__ = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
SCREAMING_SNAKE_CASE__ = GELU(_snake_case , _snake_case )
if activation_fn == "gelu-approximate":
SCREAMING_SNAKE_CASE__ = GELU(_snake_case , _snake_case , approximate="tanh" )
elif activation_fn == "geglu":
SCREAMING_SNAKE_CASE__ = GEGLU(_snake_case , _snake_case )
elif activation_fn == "geglu-approximate":
SCREAMING_SNAKE_CASE__ = ApproximateGELU(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE__ = nn.ModuleList([] )
# project in
self.net.append(_snake_case )
# project dropout
self.net.append(nn.Dropout(_snake_case ) )
# project out
self.net.append(nn.Linear(_snake_case , _snake_case ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_snake_case ) )
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : List[str] ) -> str:
for module in self.net:
SCREAMING_SNAKE_CASE__ = module(_snake_case )
return hidden_states
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self : str , _snake_case : int , _snake_case : int , _snake_case : str = "none" ) -> Dict:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE__ = approximate
def lowerCAmelCase_ ( self : Tuple , _snake_case : Optional[Any] ) -> Tuple:
if gate.device.type != "mps":
return F.gelu(_snake_case , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.proj(_snake_case )
SCREAMING_SNAKE_CASE__ = self.gelu(_snake_case )
return hidden_states
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _snake_case : int , _snake_case : int ) -> Optional[int]:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(_snake_case , dim_out * 2 )
def lowerCAmelCase_ ( self : Optional[Any] , _snake_case : Optional[Any] ) -> Optional[Any]:
if gate.device.type != "mps":
return F.gelu(_snake_case )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def lowerCAmelCase_ ( self : Tuple , _snake_case : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.proj(_snake_case ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(_snake_case )
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self : int , _snake_case : int , _snake_case : int ) -> List[Any]:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(_snake_case , _snake_case )
def lowerCAmelCase_ ( self : Optional[int] , _snake_case : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.proj(_snake_case )
return x * torch.sigmoid(1.702 * x )
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _snake_case : Dict , _snake_case : Any ) -> Optional[int]:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Embedding(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE__ = nn.SiLU()
SCREAMING_SNAKE_CASE__ = nn.Linear(_snake_case , embedding_dim * 2 )
SCREAMING_SNAKE_CASE__ = nn.LayerNorm(_snake_case , elementwise_affine=_snake_case )
def lowerCAmelCase_ ( self : int , _snake_case : int , _snake_case : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ = self.linear(self.silu(self.emb(_snake_case ) ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = torch.chunk(_snake_case , 2 )
SCREAMING_SNAKE_CASE__ = self.norm(_snake_case ) * (1 + scale) + shift
return x
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _snake_case : Dict , _snake_case : Tuple ) -> Any:
super().__init__()
SCREAMING_SNAKE_CASE__ = CombinedTimestepLabelEmbeddings(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE__ = nn.SiLU()
SCREAMING_SNAKE_CASE__ = nn.Linear(_snake_case , 6 * embedding_dim , bias=_snake_case )
SCREAMING_SNAKE_CASE__ = nn.LayerNorm(_snake_case , elementwise_affine=_snake_case , eps=1e-6 )
def lowerCAmelCase_ ( self : Dict , _snake_case : int , _snake_case : List[Any] , _snake_case : Any , _snake_case : str=None ) -> str:
SCREAMING_SNAKE_CASE__ = self.linear(self.silu(self.emb(_snake_case , _snake_case , hidden_dtype=_snake_case ) ) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = emb.chunk(6 , dim=1 )
SCREAMING_SNAKE_CASE__ = self.norm(_snake_case ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : Optional[str] = None , _snake_case : float = 1e-5 ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE__ = num_groups
SCREAMING_SNAKE_CASE__ = eps
if act_fn is None:
SCREAMING_SNAKE_CASE__ = None
else:
SCREAMING_SNAKE_CASE__ = get_activation(_snake_case )
SCREAMING_SNAKE_CASE__ = nn.Linear(_snake_case , out_dim * 2 )
def lowerCAmelCase_ ( self : Optional[Any] , _snake_case : Any , _snake_case : Optional[Any] ) -> Optional[int]:
if self.act:
SCREAMING_SNAKE_CASE__ = self.act(_snake_case )
SCREAMING_SNAKE_CASE__ = self.linear(_snake_case )
SCREAMING_SNAKE_CASE__ = emb[:, :, None, None]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = emb.chunk(2 , dim=1 )
SCREAMING_SNAKE_CASE__ = F.group_norm(_snake_case , self.num_groups , eps=self.eps )
SCREAMING_SNAKE_CASE__ = x * (1 + scale) + shift
return x
| 538 | 0 |
SCREAMING_SNAKE_CASE : dict[tuple[int, int, int], int] = {}
def __A ( _A , _A , _A ):
"""simple docstring"""
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__a = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__a = _calculate(days - 1 , _A , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__a = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__a = _calculate(days - 1 , _A , 0 )
__a = state_late + state_absent + state_ontime
__a = prizestrings
return prizestrings
def __A ( _A = 30 ):
"""simple docstring"""
return _calculate(_A , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 197 | import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = """https://openaipublic.azureedge.net/jukebox/models/"""
SCREAMING_SNAKE_CASE : int = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def __A ( _A ):
"""simple docstring"""
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
__a = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
__a = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
__a = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
__a = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
__a = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
__a = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__a = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
__a = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def __A ( _A , _A , _A , _A ):
"""simple docstring"""
__a = {}
import re
__a = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
__a = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__a = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
__a = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
__a = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__a = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
__a = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
__a = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__a = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_A ):
__a = re_encoder_block_conv_in.match(_A )
__a = regex_match.groups()
__a = int(groups[2] ) * 2 + int(groups[3] )
__a = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
__a = re_encoder_block_conv_in.sub(_A , _A )
elif re_encoder_block_resnet.fullmatch(_A ):
__a = re_encoder_block_resnet.match(_A )
__a = regex_match.groups()
__a = int(groups[2] ) * 2 + int(groups[3] )
__a = {"1": 1, "3": 2}[groups[-2]]
__a = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
__a = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__a = prefix + resnet_block
__a = re_encoder_block_resnet.sub(_A , _A )
elif re_encoder_block_proj_out.fullmatch(_A ):
__a = re_encoder_block_proj_out.match(_A )
__a = regex_match.groups()
__a = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
__a = re_encoder_block_proj_out.sub(_A , _A )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_A ):
__a = re_decoder_block_conv_out.match(_A )
__a = regex_match.groups()
__a = int(groups[2] ) * 2 + int(groups[3] ) - 2
__a = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
__a = re_decoder_block_conv_out.sub(_A , _A )
elif re_decoder_block_resnet.fullmatch(_A ):
__a = re_decoder_block_resnet.match(_A )
__a = regex_match.groups()
__a = int(groups[2] ) * 2 + int(groups[3] ) - 2
__a = {"1": 1, "3": 2}[groups[-2]]
__a = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
__a = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__a = prefix + resnet_block
__a = re_decoder_block_resnet.sub(_A , _A )
elif re_decoder_block_proj_in.fullmatch(_A ):
__a = re_decoder_block_proj_in.match(_A )
__a = regex_match.groups()
__a = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
__a = re_decoder_block_proj_in.sub(_A , _A )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_A ):
__a = re_prior_cond_conv_out.match(_A )
__a = regex_match.groups()
__a = int(groups[1] ) * 2 + int(groups[2] ) - 2
__a = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
__a = re_prior_cond_conv_out.sub(_A , _A )
elif re_prior_cond_resnet.fullmatch(_A ):
__a = re_prior_cond_resnet.match(_A )
__a = regex_match.groups()
__a = int(groups[1] ) * 2 + int(groups[2] ) - 2
__a = {"1": 1, "3": 2}[groups[-2]]
__a = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
__a = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__a = prefix + resnet_block
__a = re_prior_cond_resnet.sub(_A , _A )
elif re_prior_cond_proj_in.fullmatch(_A ):
__a = re_prior_cond_proj_in.match(_A )
__a = regex_match.groups()
__a = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
__a = re_prior_cond_proj_in.sub(_A , _A )
# keep original key
else:
__a = original_key
__a = replace_key(_A )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
__a = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
__a = original_key
__a = original_key
__a = value
return new_dict
@torch.no_grad()
def __A ( _A=None , _A=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
__a = requests.get(f"""{PREFIX}{file}""" , allow_redirects=_A )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=_A )
open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , "wb" ).write(r.content )
__a = MODEL_MAPPING[model_name.split("/" )[-1]]
__a = JukeboxConfig.from_pretrained(_A )
__a = JukeboxModel(_A )
__a = []
__a = {}
for i, dict_name in enumerate(_A ):
__a = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["model"]
__a = {}
for k in old_dic.keys():
if k.endswith(".b" ):
__a = old_dic[k]
elif k.endswith(".w" ):
__a = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__a = old_dic[k]
else:
__a = old_dic[k]
__a = "vqvae" if i == 0 else f"""priors.{3 - i}"""
__a = fix_jukebox_keys(_A , model.state_dict() , _A , _A )
weight_dict.append(_A )
__a = weight_dict.pop(0 )
model.vqvae.load_state_dict(_A )
for i in range(len(_A ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_A ).mkdir(exist_ok=_A )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , "w" ) as txtfile:
json.dump(_A , _A )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_A )
return weight_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 197 | 1 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCAmelCase_ ( _a):
def _snake_case ( self : int ) ->List[Any]:
"""simple docstring"""
a__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__A , "num_attention_heads" ) )
class lowerCAmelCase_ :
def __init__( self : Any , __A : Union[str, Any] , __A : Optional[int]=13 , __A : int=64 , __A : Optional[int]=3 , __A : List[Any]=3 , __A : str=2 , __A : Any=1 , __A : str=16 , __A : List[Any]=[128, 256, 384] , __A : Optional[int]=[4, 6, 8] , __A : Any=[2, 3, 4] , __A : Any=[16, 16, 16] , __A : Any=0 , __A : Dict=[2, 2, 2] , __A : Optional[int]=[2, 2, 2] , __A : str=0.02 , __A : Optional[Any]=True , __A : List[Any]=True , __A : Optional[int]=2 , ) ->Optional[int]:
"""simple docstring"""
a__ = parent
a__ = batch_size
a__ = image_size
a__ = num_channels
a__ = kernel_size
a__ = stride
a__ = padding
a__ = hidden_sizes
a__ = num_attention_heads
a__ = depths
a__ = key_dim
a__ = drop_path_rate
a__ = patch_size
a__ = attention_ratio
a__ = mlp_ratio
a__ = initializer_range
a__ = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
a__ = is_training
a__ = use_labels
a__ = num_labels
a__ = initializer_range
def _snake_case ( self : int ) ->Optional[int]:
"""simple docstring"""
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.num_labels )
a__ = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Tuple ) ->Tuple:
"""simple docstring"""
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _snake_case ( self : Optional[int] , __A : Tuple , __A : Dict , __A : List[str] ) ->List[Any]:
"""simple docstring"""
a__ = LevitModel(config=__A )
model.to(__A )
model.eval()
a__ = model(__A )
a__ = (self.image_size, self.image_size)
a__ , a__ = image_size[0], image_size[1]
for _ in range(4 ):
a__ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
a__ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _snake_case ( self : Optional[Any] , __A : Optional[Any] , __A : Any , __A : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
a__ = self.num_labels
a__ = LevitForImageClassification(__A )
model.to(__A )
model.eval()
a__ = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : List[str] ) ->int:
"""simple docstring"""
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ = config_and_inputs
a__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _a ,_a ,unittest.TestCase):
lowerCamelCase_ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def _snake_case ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
a__ = LevitModelTester(self )
a__ = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def _snake_case ( self : Tuple ) ->Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def _snake_case ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def _snake_case ( self : Union[str, Any] ) ->str:
"""simple docstring"""
pass
@unittest.skip(reason="Levit does not output attentions" )
def _snake_case ( self : List[Any] ) ->int:
"""simple docstring"""
pass
def _snake_case ( self : int ) ->Optional[Any]:
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__A )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def _snake_case ( self : Any ) ->str:
"""simple docstring"""
def check_hidden_states_output(__A : Tuple , __A : Optional[int] , __A : Optional[int] ):
a__ = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(__A , __A ) )
a__ = outputs.hidden_states
a__ = len(self.model_tester.depths ) + 1
self.assertEqual(len(__A ) , __A )
a__ = (self.model_tester.image_size, self.model_tester.image_size)
a__ , a__ = image_size[0], image_size[1]
for _ in range(4 ):
a__ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
a__ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ = True
check_hidden_states_output(__A , __A , __A )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : int ) ->Optional[int]:
"""simple docstring"""
pass
def _snake_case ( self : Optional[Any] , __A : Union[str, Any] , __A : str , __A : List[str]=False ) ->List[str]:
"""simple docstring"""
a__ = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _snake_case ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _snake_case ( self : Dict ) ->List[Any]:
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def _snake_case ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__A )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
a__ = model_class(__A )
model.to(__A )
model.train()
a__ = self._prepare_for_class(__A , __A , return_labels=__A )
a__ = model(**__A ).loss
loss.backward()
def _snake_case ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a__ = False
a__ = True
for model_class in self.all_model_classes:
if model_class in get_values(__A ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
a__ = model_class(__A )
model.gradient_checkpointing_enable()
model.to(__A )
model.train()
a__ = self._prepare_for_class(__A , __A , return_labels=__A )
a__ = model(**__A ).loss
loss.backward()
def _snake_case ( self : List[str] ) ->Dict:
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__A ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
a__ = problem_type["title"]
a__ = problem_type["num_labels"]
a__ = model_class(__A )
model.to(__A )
model.train()
a__ = self._prepare_for_class(__A , __A , return_labels=__A )
if problem_type["num_labels"] > 1:
a__ = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
a__ = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__A ) as warning_list:
a__ = model(**__A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def _snake_case ( self : Any ) ->Optional[int]:
"""simple docstring"""
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = LevitModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
a__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase):
@cached_property
def _snake_case ( self : Optional[Any] ) ->Any:
"""simple docstring"""
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _snake_case ( self : List[str] ) ->str:
"""simple docstring"""
a__ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__A )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=__A , return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
a__ = model(**__A )
# verify the logits
a__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __A )
a__ = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1E-4 ) )
| 701 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
snake_case__ = False
class lowerCAmelCase_ ( unittest.TestCase):
pass
@nightly
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase):
def _snake_case ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Dict ) ->Any:
"""simple docstring"""
a__ :Union[str, Any] = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
a__ :List[Any] = "A painting of a squirrel eating a burger "
a__ :Optional[Any] = torch.manual_seed(0 )
a__ :List[Any] = pipe(
prompt=__A , generator=__A , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__A )
a__ :List[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
a__ :Optional[int] = generator.manual_seed(0 )
a__ :List[Any] = pipe(
prompt=__A , generator=__A , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _snake_case ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
a__ :Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
a__ :Tuple = "A painting of a squirrel eating a burger "
a__ :Tuple = torch.manual_seed(0 )
a__ :Optional[Any] = pipe(
prompt=__A , generator=__A , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
a__ :Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
a__ :Tuple = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 373 | 0 |
'''simple docstring'''
def a ( UpperCamelCase_ : int ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
SCREAMING_SNAKE_CASE__ : int = int(input('''Enter number: ''').strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 538 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class a__( snake_case__ ):
a_ : int = '''efficientnet'''
def __init__( self , _UpperCAmelCase = 3 , _UpperCAmelCase = 600 , _UpperCAmelCase = 2.0 , _UpperCAmelCase = 3.1 , _UpperCAmelCase = 8 , _UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , _UpperCAmelCase = [32, 16, 24, 40, 80, 112, 192] , _UpperCAmelCase = [16, 24, 40, 80, 112, 192, 320] , _UpperCAmelCase = [] , _UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , _UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , _UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , _UpperCAmelCase = 0.25 , _UpperCAmelCase = "swish" , _UpperCAmelCase = 2560 , _UpperCAmelCase = "mean" , _UpperCAmelCase = 0.02 , _UpperCAmelCase = 0.001 , _UpperCAmelCase = 0.99 , _UpperCAmelCase = 0.5 , _UpperCAmelCase = 0.2 , **_UpperCAmelCase , ) -> Dict:
super().__init__(**_UpperCAmelCase )
snake_case__ =num_channels
snake_case__ =image_size
snake_case__ =width_coefficient
snake_case__ =depth_coefficient
snake_case__ =depth_divisor
snake_case__ =kernel_sizes
snake_case__ =in_channels
snake_case__ =out_channels
snake_case__ =depthwise_padding
snake_case__ =strides
snake_case__ =num_block_repeats
snake_case__ =expand_ratios
snake_case__ =squeeze_expansion_ratio
snake_case__ =hidden_act
snake_case__ =hidden_dim
snake_case__ =pooling_type
snake_case__ =initializer_range
snake_case__ =batch_norm_eps
snake_case__ =batch_norm_momentum
snake_case__ =dropout_rate
snake_case__ =drop_connect_rate
snake_case__ =sum(_UpperCAmelCase ) * 4
class a__( snake_case__ ):
a_ : List[str] = version.parse('''1.11''' )
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _lowercase ( self ) -> float:
return 1E-5
| 538 | 1 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class a__ ( ctypes.Structure ):
A = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def _snake_case ( ):
"""simple docstring"""
if os.name == "nt":
SCREAMING_SNAKE_CASE_ : List[Any] = CursorInfo()
SCREAMING_SNAKE_CASE_ : str = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCAmelCase_ , ctypes.byref(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCAmelCase_ , ctypes.byref(lowerCAmelCase_ ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def _snake_case ( ):
"""simple docstring"""
if os.name == "nt":
SCREAMING_SNAKE_CASE_ : str = CursorInfo()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCAmelCase_ , ctypes.byref(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ : Dict = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCAmelCase_ , ctypes.byref(lowerCAmelCase_ ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def _snake_case ( ):
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 707 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : str = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 316 | 0 |
import random
from typing import Any
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
for _ in range(len(lowerCAmelCase_)):
lowerCamelCase_ : Union[str, Any] = random.randint(0 , len(lowerCAmelCase_) - 1)
lowerCamelCase_ : Optional[int] = random.randint(0 , len(lowerCAmelCase_) - 1)
lowerCamelCase_ ,lowerCamelCase_ : Dict = data[b], data[a]
return data
if __name__ == "__main__":
__magic_name__ = [0, 1, 2, 3, 4, 5, 6, 7]
__magic_name__ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 250 |
import os
def __magic_name__ ( lowerCAmelCase_ = "input.txt"):
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowerCAmelCase_) , lowerCAmelCase_)) as input_file:
lowerCamelCase_ : Dict = [
[int(lowerCAmelCase_) for element in line.split(",")]
for line in input_file.readlines()
]
lowerCamelCase_ : str = len(lowerCAmelCase_)
lowerCamelCase_ : Any = len(matrix[0])
lowerCamelCase_ : Optional[Any] = [[-1 for _ in range(lowerCAmelCase_)] for _ in range(lowerCAmelCase_)]
for i in range(lowerCAmelCase_):
lowerCamelCase_ : Union[str, Any] = matrix[i][0]
for j in range(1 , lowerCAmelCase_):
for i in range(lowerCAmelCase_):
lowerCamelCase_ : List[Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase_):
lowerCamelCase_ : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j])
for i in range(rows - 2 , -1 , -1):
lowerCamelCase_ : Optional[int] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j])
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums)
if __name__ == "__main__":
print(f'''{solution() = }''')
| 250 | 1 |
'''simple docstring'''
class _snake_case :
def __init__( self , _lowerCamelCase):
UpperCAmelCase__ : List[str] = n
UpperCAmelCase__ : Any = [None] * self.n
UpperCAmelCase__ : List[Any] = 0 # index of the first element
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : List[str] = 0
def __len__( self):
return self.size
def snake_case__ ( self):
return self.size == 0
def snake_case__ ( self):
return False if self.is_empty() else self.array[self.front]
def snake_case__ ( self , _lowerCamelCase):
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""")
UpperCAmelCase__ : Dict = data
UpperCAmelCase__ : Optional[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def snake_case__ ( self):
if self.size == 0:
raise Exception("""UNDERFLOW""")
UpperCAmelCase__ : List[str] = self.array[self.front]
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Union[str, Any] = (self.front + 1) % self.n
self.size -= 1
return temp | 113 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _snake_case ( a__ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=0):
UpperCAmelCase__ : Dict = 1.0 if scale is None else scale
UpperCAmelCase__ : Dict = 0.0 if loc is None else loc
super().__init__(_lowerCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowerCamelCase)])
@property
def snake_case__ ( self):
return self.base_dist.mean * self.scale + self.loc
@property
def snake_case__ ( self):
return self.base_dist.variance * self.scale**2
@property
def snake_case__ ( self):
return self.variance.sqrt()
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase):
super().__init__(**_lowerCamelCase)
UpperCAmelCase__ : int = args_dim
UpperCAmelCase__ : Optional[int] = nn.ModuleList([nn.Linear(_lowerCamelCase , _lowerCamelCase) for dim in args_dim.values()])
UpperCAmelCase__ : Optional[Any] = domain_map
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Optional[int] = [proj(_lowerCamelCase) for proj in self.proj]
return self.domain_map(*_lowerCamelCase)
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase):
super().__init__()
UpperCAmelCase__ : Optional[int] = function
def snake_case__ ( self , _lowerCamelCase , *_lowerCamelCase):
return self.function(_lowerCamelCase , *_lowerCamelCase)
class _snake_case :
lowerCAmelCase :type
lowerCAmelCase :int
lowerCAmelCase :Dict[str, int]
def __init__( self , _lowerCamelCase = 1):
UpperCAmelCase__ : Optional[Any] = dim
UpperCAmelCase__ : int = {k: dim * self.args_dim[k] for k in self.args_dim}
def snake_case__ ( self , _lowerCamelCase):
if self.dim == 1:
return self.distribution_class(*_lowerCamelCase)
else:
return Independent(self.distribution_class(*_lowerCamelCase) , 1)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , ):
UpperCAmelCase__ : Dict = self._base_distribution(_lowerCamelCase)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowerCamelCase , loc=_lowerCamelCase , scale=_lowerCamelCase , event_dim=self.event_dim)
@property
def snake_case__ ( self):
return () if self.dim == 1 else (self.dim,)
@property
def snake_case__ ( self):
return len(self.event_shape)
@property
def snake_case__ ( self):
return 0.0
def snake_case__ ( self , _lowerCamelCase):
return ParameterProjection(
in_features=_lowerCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def snake_case__ ( self , *_lowerCamelCase):
raise NotImplementedError()
@staticmethod
def snake_case__ ( _lowerCamelCase):
return (x + torch.sqrt(torch.square(_lowerCamelCase) + 4.0)) / 2.0
class _snake_case ( a__ ):
lowerCAmelCase :Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
lowerCAmelCase :type = StudentT
@classmethod
def snake_case__ ( cls , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = cls.squareplus(_lowerCamelCase).clamp_min(torch.finfo(scale.dtype).eps)
UpperCAmelCase__ : Any = 2.0 + cls.squareplus(_lowerCamelCase)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class _snake_case ( a__ ):
lowerCAmelCase :Dict[str, int] = {"loc": 1, "scale": 1}
lowerCAmelCase :type = Normal
@classmethod
def snake_case__ ( cls , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : str = cls.squareplus(_lowerCamelCase).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class _snake_case ( a__ ):
lowerCAmelCase :Dict[str, int] = {"total_count": 1, "logits": 1}
lowerCAmelCase :type = NegativeBinomial
@classmethod
def snake_case__ ( cls , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : str = cls.squareplus(_lowerCamelCase)
return total_count.squeeze(-1), logits.squeeze(-1)
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowerCamelCase , logits=_lowerCamelCase)
else:
return Independent(self.distribution_class(total_count=_lowerCamelCase , logits=_lowerCamelCase) , 1)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None):
UpperCAmelCase__ , UpperCAmelCase__ : Any = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits)) | 113 | 1 |
from __future__ import annotations
from collections import namedtuple
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __A ( lowerCAmelCase_ ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"could not parse string as bool {string}" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
lowerCAmelCase_ : Tuple = parser.parse_args()
lowerCAmelCase_ : Union[str, Any] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 414 | 0 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__UpperCamelCase = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__UpperCamelCase = [0, 25, 50]
__UpperCamelCase = [25, 50, 75]
__UpperCamelCase = fuzz.membership.trimf(X, abca)
__UpperCamelCase = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__UpperCamelCase = np.ones(75)
__UpperCamelCase = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__UpperCamelCase = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__UpperCamelCase = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__UpperCamelCase = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__UpperCamelCase = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__UpperCamelCase = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__UpperCamelCase = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__UpperCamelCase = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__UpperCamelCase = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 708 |
from __future__ import annotations
def a_ ( _A , _A ) -> str:
"""simple docstring"""
# Checks if the entire collection has been sorted
if len(_A ) <= 1 or n <= 1:
return
insert_next(_A , n - 1 )
rec_insertion_sort(_A , n - 1 )
def a_ ( _A , _A ) -> Tuple:
"""simple docstring"""
# Checks order between adjacent elements
if index >= len(_A ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
snake_case__ , snake_case__ = (
collection[index],
collection[index - 1],
)
insert_next(_A , index + 1 )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = input("""Enter integers separated by spaces: """)
__UpperCamelCase : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 372 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Optional[int] = "levit"
def __init__( self , SCREAMING_SNAKE_CASE__=224 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[128, 256, 384] , SCREAMING_SNAKE_CASE__=[4, 8, 12] , SCREAMING_SNAKE_CASE__=[4, 4, 4] , SCREAMING_SNAKE_CASE__=[16, 16, 16] , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=[2, 2, 2] , SCREAMING_SNAKE_CASE__=[2, 2, 2] , SCREAMING_SNAKE_CASE__=0.0_2 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A__ = image_size
A__ = num_channels
A__ = kernel_size
A__ = stride
A__ = padding
A__ = hidden_sizes
A__ = num_attention_heads
A__ = depths
A__ = key_dim
A__ = drop_path_rate
A__ = patch_size
A__ = attention_ratio
A__ = mlp_ratio
A__ = initializer_range
A__ = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : int = version.parse("1.11" )
@property
def snake_case__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self ) -> float:
return 1e-4
| 104 |
'''simple docstring'''
a : Dict = range(2, 20 + 1)
a : Optional[int] = [10**k for k in range(ks[-1] + 1)]
a : dict[int, dict[int, list[list[int]]]] = {}
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] ) -> int:
__snake_case = sum(a_i[j] for j in range(_UpperCAmelCase , len(_UpperCAmelCase ) ) )
__snake_case = sum(a_i[j] * base[j] for j in range(min(len(_UpperCAmelCase ) , _UpperCAmelCase ) ) )
__snake_case , __snake_case = 0, 0
__snake_case = n - i
__snake_case = memo.get(_UpperCAmelCase )
if sub_memo is not None:
__snake_case = sub_memo.get(_UpperCAmelCase )
if jumps is not None and len(_UpperCAmelCase ) > 0:
# find and make the largest jump without going over
__snake_case = -1
for _k in range(len(_UpperCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
__snake_case = _k
break
if max_jump >= 0:
__snake_case , __snake_case , __snake_case = jumps[max_jump]
# since the difference between jumps is cached, add c
__snake_case = diff + c
for j in range(min(_UpperCAmelCase , len(_UpperCAmelCase ) ) ):
__snake_case , __snake_case = divmod(_UpperCAmelCase , 10 )
if new_c > 0:
add(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__snake_case = []
else:
__snake_case = {c: []}
__snake_case = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
__snake_case , __snake_case = next_term(_UpperCAmelCase , k - 1 , i + dn , _UpperCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
__snake_case , __snake_case = compute(_UpperCAmelCase , _UpperCAmelCase , i + dn , _UpperCAmelCase )
diff += _diff
dn += terms_jumped
__snake_case = sub_memo[c]
# keep jumps sorted by # of terms skipped
__snake_case = 0
while j < len(_UpperCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_UpperCAmelCase , (diff, dn, k) )
return (diff, dn)
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> Optional[int]:
if i >= n:
return 0, i
if k > len(_UpperCAmelCase ):
a_i.extend([0 for _ in range(k - len(_UpperCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
__snake_case = i
__snake_case , __snake_case , __snake_case = 0, 0, 0
for j in range(len(_UpperCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
__snake_case = ds_c + ds_b
diff += addend
__snake_case = 0
for j in range(_UpperCAmelCase ):
__snake_case = a_i[j] + addend
__snake_case , __snake_case = divmod(_UpperCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return diff, i - start_i
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str ) -> Tuple:
for j in range(_UpperCAmelCase , len(_UpperCAmelCase ) ):
__snake_case = digits[j] + addend
if s >= 10:
__snake_case , __snake_case = divmod(_UpperCAmelCase , 10 )
__snake_case = addend // 10 + quotient
else:
__snake_case = s
__snake_case = addend // 10
if addend == 0:
break
while addend > 0:
__snake_case , __snake_case = divmod(_UpperCAmelCase , 10 )
digits.append(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : int = 10**15 ) -> int:
__snake_case = [1]
__snake_case = 1
__snake_case = 0
while True:
__snake_case , __snake_case = next_term(_UpperCAmelCase , 20 , i + dn , _UpperCAmelCase )
dn += terms_jumped
if dn == n - i:
break
__snake_case = 0
for j in range(len(_UpperCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 69 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ : str = logging.get_logger(__name__)
a_ : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
a_ : List[str] = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
a_ : Optional[int] = {
'gpt-neox-20b': 20_48,
}
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
_lowercase : Optional[int] = VOCAB_FILES_NAMES
_lowercase : int = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="<|endoftext|>" , SCREAMING_SNAKE_CASE="<|endoftext|>" , SCREAMING_SNAKE_CASE="<|endoftext|>" , SCREAMING_SNAKE_CASE=False , **SCREAMING_SNAKE_CASE , ) -> str:
super().__init__(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
a__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE ) != add_prefix_space:
a__ = getattr(SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
a__ = add_prefix_space
a__ = pre_tok_class(**SCREAMING_SNAKE_CASE )
a__ = add_prefix_space
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
a__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
return tuple(SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[int]:
a__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(SCREAMING_SNAKE_CASE ) > self.model_max_length:
a__ = input_ids[-self.model_max_length :]
return input_ids
| 148 |
import random
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
a__ , a__ , a__ = [], [], []
for element in data:
if element < pivot:
less.append(__UpperCAmelCase )
elif element > pivot:
greater.append(__UpperCAmelCase )
else:
equal.append(__UpperCAmelCase )
return less, equal, greater
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__UpperCAmelCase ) or index < 0:
return None
a__ = items[random.randint(0 , len(__UpperCAmelCase ) - 1 )]
a__ = 0
a__ , a__ , a__ = _partition(__UpperCAmelCase , __UpperCAmelCase )
a__ = len(__UpperCAmelCase )
a__ = len(__UpperCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__UpperCAmelCase , __UpperCAmelCase )
# must be in larger
else:
return quick_select(__UpperCAmelCase , index - (m + count) )
| 148 | 1 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A_ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
return None
class A_ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
return None
class A_ ( unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(snake_case , 'tf' , 12 , **snake_case )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(snake_case , 'pt' , 12 , **snake_case )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
from transformers import BertModel
lowercase = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(snake_case ) )
vocab_file.flush()
lowercase = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase = BertModel(BertConfig(vocab_size=len(snake_case ) ) )
model.save_pretrained(snake_case )
self._test_export(snake_case , 'pt' , 12 , snake_case )
@require_tf
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase = self._test_export(snake_case , 'tf' , 12 , **snake_case )
lowercase = quantize(Path(snake_case ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(snake_case ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase = self._test_export(snake_case , 'pt' , 12 , **snake_case )
lowercase = quantize(snake_case )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(snake_case ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case=None , **snake_case ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase = Path(snake_case ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case )
return path
except Exception as e:
self.fail(snake_case )
@require_torch
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
from transformers import BertModel
lowercase = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(snake_case , snake_case , 'pt' )
@require_tf
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
from transformers import TFBertModel
lowercase = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowercase = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(snake_case , snake_case , 'tf' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case ):
lowercase = FeatureExtractionPipeline(snake_case , snake_case )
lowercase = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowercase , lowercase , lowercase , lowercase = infer_shapes(snake_case , snake_case )
# Assert all variables are present
self.assertEqual(len(snake_case ) , len(snake_case ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , snake_case )
self.assertSequenceEqual(variable_names[3:] , snake_case )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ['input_ids', 'attention_mask', 'token_type_ids']
lowercase = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowercase , lowercase = ensure_valid_input(FuncContiguousArgs() , snake_case , snake_case )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(snake_case ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(snake_case ) , set(snake_case ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(snake_case , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase , lowercase = ensure_valid_input(FuncNonContiguousArgs() , snake_case , snake_case )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(snake_case ) , 1 )
self.assertEqual(len(snake_case ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 84 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class snake_case_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = DownBlockaD # noqa F405
__UpperCamelCase = '''down'''
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
__lowercase = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(__lowerCamelCase )
class snake_case_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ResnetDownsampleBlockaD # noqa F405
__UpperCamelCase = '''down'''
def UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
__lowercase = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(__lowerCamelCase )
class snake_case_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = AttnDownBlockaD # noqa F405
__UpperCamelCase = '''down'''
def UpperCAmelCase ( self : Any ) -> List[Any]:
'''simple docstring'''
__lowercase = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(__lowerCamelCase )
class snake_case_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = CrossAttnDownBlockaD # noqa F405
__UpperCamelCase = '''down'''
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__lowercase , __lowercase = super().prepare_init_args_and_inputs_for_common()
__lowercase = 32
return init_dict, inputs_dict
def UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
__lowercase = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(__lowerCamelCase )
class snake_case_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = SimpleCrossAttnDownBlockaD # noqa F405
__UpperCamelCase = '''down'''
@property
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=__lowerCamelCase )
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
__lowercase , __lowercase = super().prepare_init_args_and_inputs_for_common()
__lowercase = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
'''simple docstring'''
__lowercase = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(__lowerCamelCase )
class snake_case_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = SkipDownBlockaD # noqa F405
__UpperCamelCase = '''down'''
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=__lowerCamelCase )
def UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
__lowercase = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(__lowerCamelCase )
class snake_case_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = AttnSkipDownBlockaD # noqa F405
__UpperCamelCase = '''down'''
@property
def UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=__lowerCamelCase )
def UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
__lowercase = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(__lowerCamelCase )
class snake_case_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = DownEncoderBlockaD # noqa F405
__UpperCamelCase = '''down'''
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return super().get_dummy_input(include_temb=__lowerCamelCase )
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__lowercase = {
'in_channels': 32,
'out_channels': 32,
}
__lowercase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
__lowercase = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(__lowerCamelCase )
class snake_case_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = AttnDownEncoderBlockaD # noqa F405
__UpperCamelCase = '''down'''
@property
def UpperCAmelCase ( self : int ) -> str:
'''simple docstring'''
return super().get_dummy_input(include_temb=__lowerCamelCase )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = {
'in_channels': 32,
'out_channels': 32,
}
__lowercase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__lowercase = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(__lowerCamelCase )
class snake_case_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = UNetMidBlockaD # noqa F405
__UpperCamelCase = '''mid'''
def UpperCAmelCase ( self : int ) -> List[str]:
'''simple docstring'''
__lowercase = {
'in_channels': 32,
'temb_channels': 128,
}
__lowercase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
__lowercase = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(__lowerCamelCase )
class snake_case_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = UNetMidBlockaDCrossAttn # noqa F405
__UpperCamelCase = '''mid'''
def UpperCAmelCase ( self : int ) -> Tuple:
'''simple docstring'''
__lowercase , __lowercase = super().prepare_init_args_and_inputs_for_common()
__lowercase = 32
return init_dict, inputs_dict
def UpperCAmelCase ( self : Dict ) -> List[Any]:
'''simple docstring'''
__lowercase = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(__lowerCamelCase )
class snake_case_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = UNetMidBlockaDSimpleCrossAttn # noqa F405
__UpperCamelCase = '''mid'''
@property
def UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=__lowerCamelCase )
def UpperCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
__lowercase , __lowercase = super().prepare_init_args_and_inputs_for_common()
__lowercase = 32
return init_dict, inputs_dict
def UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
__lowercase = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(__lowerCamelCase )
class snake_case_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = UpBlockaD # noqa F405
__UpperCamelCase = '''up'''
@property
def UpperCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCamelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> str:
'''simple docstring'''
__lowercase = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(__lowerCamelCase )
class snake_case_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ResnetUpsampleBlockaD # noqa F405
__UpperCamelCase = '''up'''
@property
def UpperCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCamelCase )
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
__lowercase = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(__lowerCamelCase )
class snake_case_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = CrossAttnUpBlockaD # noqa F405
__UpperCamelCase = '''up'''
@property
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCamelCase )
def UpperCAmelCase ( self : Any ) -> Tuple:
'''simple docstring'''
__lowercase , __lowercase = super().prepare_init_args_and_inputs_for_common()
__lowercase = 32
return init_dict, inputs_dict
def UpperCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
__lowercase = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(__lowerCamelCase )
class snake_case_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = SimpleCrossAttnUpBlockaD # noqa F405
__UpperCamelCase = '''up'''
@property
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCamelCase , include_encoder_hidden_states=__lowerCamelCase )
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
__lowercase , __lowercase = super().prepare_init_args_and_inputs_for_common()
__lowercase = 32
return init_dict, inputs_dict
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__lowercase = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(__lowerCamelCase )
class snake_case_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = AttnUpBlockaD # noqa F405
__UpperCamelCase = '''up'''
@property
def UpperCAmelCase ( self : Any ) -> str:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCamelCase )
@unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' )
def UpperCAmelCase ( self : int ) -> str:
'''simple docstring'''
__lowercase = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(__lowerCamelCase )
class snake_case_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = SkipUpBlockaD # noqa F405
__UpperCamelCase = '''up'''
@property
def UpperCAmelCase ( self : int ) -> str:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCamelCase )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
__lowercase = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(__lowerCamelCase )
class snake_case_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = AttnSkipUpBlockaD # noqa F405
__UpperCamelCase = '''up'''
@property
def UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCamelCase )
def UpperCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
__lowercase = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(__lowerCamelCase )
class snake_case_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = UpDecoderBlockaD # noqa F405
__UpperCamelCase = '''up'''
@property
def UpperCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
return super().get_dummy_input(include_temb=__lowerCamelCase )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
__lowercase = {'in_channels': 32, 'out_channels': 32}
__lowercase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__lowercase = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(__lowerCamelCase )
class snake_case_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = AttnUpDecoderBlockaD # noqa F405
__UpperCamelCase = '''up'''
@property
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
return super().get_dummy_input(include_temb=__lowerCamelCase )
def UpperCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
__lowercase = {'in_channels': 32, 'out_channels': 32}
__lowercase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
__lowercase = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(__lowerCamelCase )
| 375 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 717 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler | 210 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__snake_case = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
__snake_case = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
__snake_case = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
A = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( a__ , a__):
@register_to_config
def __init__( self, A, A = None, A = None ):
"""simple docstring"""
super().__init__()
lowerCamelCase : str = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
lowerCamelCase : Any = torch.zeros(A, A )
else:
lowerCamelCase : Tuple = None
lowerCamelCase : List[Any] = torch.nn.Parameter(A )
class __snake_case ( a__):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self, A, A, A, A, A, A, ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=A, transformer=A, text_encoder=A, tokenizer=A, scheduler=A, learned_classifier_free_sampling_embeddings=A, )
def UpperCAmelCase_ ( self, A, A, A ):
"""simple docstring"""
lowerCamelCase : List[Any] = len(A ) if isinstance(A, A ) else 1
# get prompt text embeddings
lowerCamelCase : List[Any] = self.tokenizer(
A, padding='max_length', max_length=self.tokenizer.model_max_length, return_tensors='pt', )
lowerCamelCase : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase : Union[str, Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCamelCase : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCamelCase : Optional[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
lowerCamelCase : Dict = prompt_embeds / prompt_embeds.norm(dim=-1, keepdim=A )
# duplicate text embeddings for each generation per prompt
lowerCamelCase : Union[str, Any] = prompt_embeds.repeat_interleave(A, dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
lowerCamelCase : int = self.learned_classifier_free_sampling_embeddings.embeddings
lowerCamelCase : Tuple = negative_prompt_embeds.unsqueeze(0 ).repeat(A, 1, 1 )
else:
lowerCamelCase : Optional[int] = [''] * batch_size
lowerCamelCase : Optional[Any] = text_input_ids.shape[-1]
lowerCamelCase : Union[str, Any] = self.tokenizer(
A, padding='max_length', max_length=A, truncation=A, return_tensors='pt', )
lowerCamelCase : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
lowerCamelCase : Tuple = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1, keepdim=A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase : Optional[int] = negative_prompt_embeds.shape[1]
lowerCamelCase : List[str] = negative_prompt_embeds.repeat(1, A, 1 )
lowerCamelCase : str = negative_prompt_embeds.view(batch_size * num_images_per_prompt, A, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase : Union[str, Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self, A, A = 100, A = 5.0, A = 1.0, A = 1, A = None, A = None, A = "pil", A = True, A = None, A = 1, ):
"""simple docstring"""
if isinstance(A, A ):
lowerCamelCase : Dict = 1
elif isinstance(A, A ):
lowerCamelCase : int = len(A )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A )}''' )
lowerCamelCase : str = batch_size * num_images_per_prompt
lowerCamelCase : List[str] = guidance_scale > 1.0
lowerCamelCase : int = self._encode_prompt(A, A, A )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A, A ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(A )}.''' )
# get the initial completely masked latents unless the user supplied it
lowerCamelCase : Any = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
lowerCamelCase : Optional[int] = self.transformer.num_vector_embeds - 1
lowerCamelCase : List[Any] = torch.full(A, A ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
lowerCamelCase : Optional[int] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A, device=self.device )
lowerCamelCase : str = self.scheduler.timesteps.to(self.device )
lowerCamelCase : Dict = latents
for i, t in enumerate(self.progress_bar(A ) ):
# expand the sample if we are doing classifier free guidance
lowerCamelCase : Union[str, Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
lowerCamelCase : List[Any] = self.transformer(A, encoder_hidden_states=A, timestep=A ).sample
if do_classifier_free_guidance:
lowerCamelCase , lowerCamelCase : int = model_output.chunk(2 )
lowerCamelCase : str = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A, dim=1, keepdim=A )
lowerCamelCase : Optional[int] = self.truncate(A, A )
# remove `log(0)`'s (`-inf`s)
lowerCamelCase : Any = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase : Any = self.scheduler.step(A, timestep=A, sample=A, generator=A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A, A, A )
lowerCamelCase : str = self.vqvae.config.vq_embed_dim
lowerCamelCase : List[Any] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
lowerCamelCase : Tuple = self.vqvae.quantize.get_codebook_entry(A, shape=A )
lowerCamelCase : Union[str, Any] = self.vqvae.decode(A, force_not_quantize=A ).sample
lowerCamelCase : Tuple = (image / 2 + 0.5).clamp(0, 1 )
lowerCamelCase : List[Any] = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCamelCase : int = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
def UpperCAmelCase_ ( self, A, A ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Dict = torch.sort(A, 1, descending=A )
lowerCamelCase : Dict = torch.exp(A )
lowerCamelCase : Any = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
lowerCamelCase : Dict = torch.full_like(keep_mask[:, 0:1, :], A )
lowerCamelCase : int = torch.cat((all_true, keep_mask), dim=1 )
lowerCamelCase : int = keep_mask[:, :-1, :]
lowerCamelCase : str = keep_mask.gather(1, indices.argsort(1 ) )
lowerCamelCase : str = log_p_x_0.clone()
lowerCamelCase : Union[str, Any] = -torch.inf # -inf = log(0)
return rv
| 320 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''timm_backbone'''
def __init__( self , a__=None , a__=3 , a__=True , a__=True , a__=None , **a__ , ):
super().__init__(**a__)
A__ = backbone
A__ = num_channels
A__ = features_only
A__ = use_pretrained_backbone
A__ = True
A__ = out_indices if out_indices is not None else (-1,)
| 526 |
import numpy as np
def lowerCAmelCase__ ( UpperCamelCase_ : np.array )-> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase__ ( UpperCamelCase_ : np.array )-> np.array:
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 526 | 1 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def a ( __a , __a=1000 ) -> Union[str, Any]:
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
UpperCamelCase__ :Union[str, Any] = n - 1
UpperCamelCase__ :str = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
UpperCamelCase__ :Optional[int] = 0
while count < prec:
UpperCamelCase__ :Dict = random.randint(2 , n - 1 )
UpperCamelCase__ :List[Any] = bin_exp_mod(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if b != 1:
UpperCamelCase__ :Dict = True
for _ in range(__SCREAMING_SNAKE_CASE ):
if b == n - 1:
UpperCamelCase__ :List[Any] = False
break
UpperCamelCase__ :Any = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
__snake_case = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 189 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> set:
UpperCAmelCase_ = set()
# edges = list of graph's edges
UpperCAmelCase_ = get_edges(__SCREAMING_SNAKE_CASE )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
UpperCAmelCase_ , UpperCAmelCase_ = edges.pop()
chosen_vertices.add(__SCREAMING_SNAKE_CASE )
chosen_vertices.add(__SCREAMING_SNAKE_CASE )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__SCREAMING_SNAKE_CASE )
return chosen_vertices
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> set:
UpperCAmelCase_ = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 579 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
a_ : Optional[Any] = logging.getLogger(__name__)
def _A (lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> str:
'''simple docstring'''
if os.path.exists(lowerCAmelCase__ ):
if os.path.exists(os.path.join(lowerCAmelCase__ , 'config.json' ) ) and os.path.isfile(
os.path.join(lowerCAmelCase__ , 'config.json' ) ):
os.remove(os.path.join(lowerCAmelCase__ , 'config.json' ) )
if os.path.exists(os.path.join(lowerCAmelCase__ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(lowerCAmelCase__ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(lowerCAmelCase__ , 'pytorch_model.bin' ) )
else:
os.makedirs(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
def _A (lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int]=False ) -> Tuple:
'''simple docstring'''
_a = 2
if unlogit:
_a = torch.pow(lowerCAmelCase__ , lowerCAmelCase__ )
_a = p * torch.log(lowerCAmelCase__ )
_a = 0
return -plogp.sum(dim=-1 )
def _A (lowerCAmelCase__ :List[str] ) -> Optional[int]:
'''simple docstring'''
logger.info('lv, h >\t' + '\t'.join(f'{x + 1}' for x in range(len(lowerCAmelCase__ ) ) ) )
for row in range(len(lowerCAmelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:d}' for x in tensor[row].cpu().data ) )
def _A (lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :str=False ) -> str:
'''simple docstring'''
_a , _a = model.config.num_hidden_layers, model.config.num_attention_heads
_a = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ ).to(args.device )
_a = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ ).to(args.device )
if head_mask is None:
_a = torch.ones(lowerCAmelCase__ , lowerCAmelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCAmelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_a = None
_a = 0.0
_a = 0.0
for step, inputs in enumerate(tqdm(lowerCAmelCase__ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_a = tuple(t.to(args.device ) for t in inputs )
((_a) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_a = model(lowerCAmelCase__ , labels=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_a , _a , _a = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCAmelCase__ ):
_a = entropy(attn.detach() , lowerCAmelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCAmelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_a = 2
_a = torch.pow(torch.pow(lowerCAmelCase__ , lowerCAmelCase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_a = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(lowerCAmelCase__ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(lowerCAmelCase__ )
logger.info('Head ranked by importance scores' )
_a = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_a = torch.arange(
head_importance.numel() , device=args.device )
_a = head_ranks.view_as(lowerCAmelCase__ )
print_ad_tensor(lowerCAmelCase__ )
return attn_entropy, head_importance, total_loss
def _A (lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any] ) -> str:
'''simple docstring'''
_a , _a , _a = compute_heads_importance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , compute_entropy=lowerCAmelCase__ )
_a = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , lowerCAmelCase__ , original_score * args.masking_threshold )
_a = torch.ones_like(lowerCAmelCase__ )
_a = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_a = original_score
while current_score >= original_score * args.masking_threshold:
_a = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_a = float('Inf' )
_a = head_importance.view(-1 ).sort()[1]
if len(lowerCAmelCase__ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_a = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_a = new_head_mask.view(-1 )
_a = 0.0
_a = new_head_mask.view_as(lowerCAmelCase__ )
_a = new_head_mask.clone().detach()
print_ad_tensor(lowerCAmelCase__ )
# Compute metric and head importance again
_a , _a , _a = compute_heads_importance(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , compute_entropy=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
_a = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , lowerCAmelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(lowerCAmelCase__ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _A (lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] ) -> int:
'''simple docstring'''
_a = datetime.now()
_a , _a , _a = compute_heads_importance(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , compute_entropy=lowerCAmelCase__ , compute_importance=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
_a = 1 / loss
_a = datetime.now() - before_time
_a = sum(p.numel() for p in model.parameters() )
_a = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_a = [
v,
]
assert sum(len(lowerCAmelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCAmelCase__ )
_a = sum(p.numel() for p in model.parameters() )
_a = datetime.now()
_a , _a , _a = compute_heads_importance(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , compute_entropy=lowerCAmelCase__ , compute_importance=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , actually_pruned=lowerCAmelCase__ , )
_a = 1 / loss
_a = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , lowerCAmelCase__ , lowerCAmelCase__ , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , lowerCAmelCase__ , lowerCAmelCase__ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(lowerCAmelCase__ , args.output_dir )
def _A () -> List[Any]:
'''simple docstring'''
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=lowerCAmelCase__ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=lowerCAmelCase__ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=lowerCAmelCase__ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=lowerCAmelCase__ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=lowerCAmelCase__ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=lowerCAmelCase__ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=lowerCAmelCase__ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=lowerCAmelCase__ , help='Batch size.' )
parser.add_argument('--seed' , type=lowerCAmelCase__ , default=42 )
parser.add_argument('--local_rank' , type=lowerCAmelCase__ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=lowerCAmelCase__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=lowerCAmelCase__ , default='' , help='Can be used for distant debugging.' )
_a = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_a = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_a = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_a = torch.device('cuda' , args.local_rank )
_a = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_a = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_a = nn.parallel.DistributedDataParallel(
lowerCAmelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase__ )
elif args.n_gpu > 1:
_a = nn.DataParallel(lowerCAmelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase__ )
torch.save(lowerCAmelCase__ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , lowerCAmelCase__ )
# Prepare dataset
_a = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_a = (torch.from_numpy(lowerCAmelCase__ ),)
_a = TensorDataset(*lowerCAmelCase__ )
_a = RandomSampler(lowerCAmelCase__ )
_a = DataLoader(lowerCAmelCase__ , sampler=lowerCAmelCase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_a = mask_heads(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
prune_heads(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 532 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
a_ : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=_SCREAMING_SNAKE_CASE )
class a :
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
@dataclass(frozen=_SCREAMING_SNAKE_CASE )
class a :
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 42
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = False , ) -> Optional[int]:
_a = hans_processors[task]()
_a = os.path.join(
__magic_name__ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(__magic_name__ ) , __magic_name__ , ) , )
_a = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_a , _a = label_list[2], label_list[1]
_a = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a = cached_features_file + '.lock'
with FileLock(__magic_name__ ):
if os.path.exists(__magic_name__ ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
_a = torch.load(__magic_name__ )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
_a = (
processor.get_dev_examples(__magic_name__ ) if evaluate else processor.get_train_examples(__magic_name__ )
)
logger.info('Training examples: %s' , len(__magic_name__ ) )
_a = hans_convert_examples_to_features(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
logger.info('Saving features into cached file %s' , __magic_name__ )
torch.save(self.features , __magic_name__ )
def __len__( self ) -> List[Any]:
return len(self.features )
def __getitem__( self , __magic_name__ ) -> InputFeatures:
return self.features[i]
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class a :
_lowerCAmelCase = 42
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 1_28 , __magic_name__=False , __magic_name__ = False , ) -> str:
_a = hans_processors[task]()
_a = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_a , _a = label_list[2], label_list[1]
_a = label_list
_a = processor.get_dev_examples(__magic_name__ ) if evaluate else processor.get_train_examples(__magic_name__ )
_a = hans_convert_examples_to_features(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(__magic_name__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
_a = tf.data.Dataset.from_generator(
__magic_name__ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def __UpperCAmelCase ( self ) -> Tuple:
return self.dataset
def __len__( self ) -> Optional[Any]:
return len(self.features )
def __getitem__( self , __magic_name__ ) -> InputFeatures:
return self.features[i]
def __UpperCAmelCase ( self ) -> List[Any]:
return self.label_list
class a ( _SCREAMING_SNAKE_CASE ):
def __UpperCAmelCase ( self , __magic_name__ ) -> Tuple:
return self._create_examples(self._read_tsv(os.path.join(__magic_name__ , 'heuristics_train_set.txt' ) ) , 'train' )
def __UpperCAmelCase ( self , __magic_name__ ) -> List[Any]:
return self._create_examples(self._read_tsv(os.path.join(__magic_name__ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def __UpperCAmelCase ( self ) -> Tuple:
return ["contradiction", "entailment", "neutral"]
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Optional[Any]:
_a = []
for i, line in enumerate(__magic_name__ ):
if i == 0:
continue
_a = '%s-%s' % (set_type, line[0])
_a = line[5]
_a = line[6]
_a = line[7][2:] if line[7].startswith('ex' ) else line[7]
_a = line[0]
examples.append(InputExample(guid=__magic_name__ , text_a=__magic_name__ , text_b=__magic_name__ , label=__magic_name__ , pairID=__magic_name__ ) )
return examples
def _A (lowerCAmelCase__ :List[InputExample] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int , lowerCAmelCase__ :PreTrainedTokenizer , ) -> Tuple:
'''simple docstring'''
_a = {label: i for i, label in enumerate(lowerCAmelCase__ )}
_a = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCAmelCase__ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
_a = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='max_length' , truncation=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , )
_a = label_map[example.label] if example.label in label_map else 0
_a = int(example.pairID )
features.append(InputFeatures(**lowerCAmelCase__ , label=lowerCAmelCase__ , pairID=lowerCAmelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
a_ : Optional[int] = {
"hans": 3,
}
a_ : Optional[Any] = {
"hans": HansProcessor,
}
| 532 | 1 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> List[str]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Tuple:
_snake_case = []
for old_item in old_list:
_snake_case = old_item.replace('''in_layers.0''' , '''norm1''' )
_snake_case = new_item.replace('''in_layers.2''' , '''conv1''' )
_snake_case = new_item.replace('''out_layers.0''' , '''norm2''' )
_snake_case = new_item.replace('''out_layers.3''' , '''conv2''' )
_snake_case = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
_snake_case = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
_snake_case = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Dict:
_snake_case = []
for old_item in old_list:
_snake_case = old_item
_snake_case = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
_snake_case = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
_snake_case = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
_snake_case = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
_snake_case = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> List[Any]:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_snake_case = old_checkpoint[path]
_snake_case = old_tensor.shape[0] // 3
_snake_case = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_snake_case = old_tensor.shape[0] // config['''num_head_channels'''] // 3
_snake_case = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_snake_case , _snake_case , _snake_case = old_tensor.split(channels // num_heads , dim=1 )
_snake_case = query.reshape(lowerCAmelCase_ )
_snake_case = key.reshape(lowerCAmelCase_ )
_snake_case = value.reshape(lowerCAmelCase_ )
for path in paths:
_snake_case = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_snake_case = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
_snake_case = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
_snake_case = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
_snake_case = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_snake_case = old_checkpoint[path['''old''']][:, :, 0]
else:
_snake_case = old_checkpoint[path['''old''']]
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_snake_case = {}
_snake_case = checkpoint['''time_embed.0.weight''']
_snake_case = checkpoint['''time_embed.0.bias''']
_snake_case = checkpoint['''time_embed.2.weight''']
_snake_case = checkpoint['''time_embed.2.bias''']
_snake_case = checkpoint['''input_blocks.0.0.weight''']
_snake_case = checkpoint['''input_blocks.0.0.bias''']
_snake_case = checkpoint['''out.0.weight''']
_snake_case = checkpoint['''out.0.bias''']
_snake_case = checkpoint['''out.2.weight''']
_snake_case = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
_snake_case = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
_snake_case = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the middle blocks only
_snake_case = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
_snake_case = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the output blocks only
_snake_case = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
_snake_case = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
for i in range(1 , lowerCAmelCase_ ):
_snake_case = (i - 1) // (config['''num_res_blocks'''] + 1)
_snake_case = (i - 1) % (config['''num_res_blocks'''] + 1)
_snake_case = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
_snake_case = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
_snake_case = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
_snake_case = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
_snake_case = renew_resnet_paths(lowerCAmelCase_ )
_snake_case = {'''old''': f"""input_blocks.{i}.0""", '''new''': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
_snake_case = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCAmelCase_ )
if len(lowerCAmelCase_ ):
_snake_case = renew_attention_paths(lowerCAmelCase_ )
_snake_case = {
'''old''': f"""input_blocks.{i}.1""",
'''new''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_snake_case = {
f"""input_blocks.{i}.1.qkv.bias""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ , )
_snake_case = middle_blocks[0]
_snake_case = middle_blocks[1]
_snake_case = middle_blocks[2]
_snake_case = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_snake_case = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_snake_case = renew_attention_paths(lowerCAmelCase_ )
_snake_case = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
_snake_case = i // (config['''num_res_blocks'''] + 1)
_snake_case = i % (config['''num_res_blocks'''] + 1)
_snake_case = [shave_segments(lowerCAmelCase_ , 2 ) for name in output_blocks[i]]
_snake_case = {}
for layer in output_block_layers:
_snake_case , _snake_case = layer.split('''.''' )[0], shave_segments(lowerCAmelCase_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowerCAmelCase_ )
else:
_snake_case = [layer_name]
if len(lowerCAmelCase_ ) > 1:
_snake_case = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
_snake_case = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
_snake_case = renew_resnet_paths(lowerCAmelCase_ )
_snake_case = renew_resnet_paths(lowerCAmelCase_ )
_snake_case = {'''old''': f"""output_blocks.{i}.0""", '''new''': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , config=lowerCAmelCase_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_snake_case = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
_snake_case = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
_snake_case = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(lowerCAmelCase_ ) == 2:
_snake_case = []
if len(lowerCAmelCase_ ):
_snake_case = renew_attention_paths(lowerCAmelCase_ )
_snake_case = {
'''old''': f"""output_blocks.{i}.1""",
'''new''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_snake_case = {
f"""output_blocks.{i}.1.qkv.bias""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=lowerCAmelCase_ , )
else:
_snake_case = renew_resnet_paths(lowerCAmelCase_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_snake_case = '''.'''.join(['''output_blocks''', str(lowerCAmelCase_ ), path['''old''']] )
_snake_case = '''.'''.join(['''up_blocks''', str(lowerCAmelCase_ ), '''resnets''', str(lowerCAmelCase_ ), path['''new''']] )
_snake_case = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
snake_case = parser.parse_args()
snake_case = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
snake_case = json.loads(f.read())
snake_case = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
snake_case = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
snake_case = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
snake_case = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
snake_case = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 103 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
A__ : Optional[int] = ['''torch''', '''scipy''']
def __init__( self : Any , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Any ):
"""simple docstring"""
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def __UpperCAmelCase ( cls : Dict , *__lowerCamelCase : List[str] , **__lowerCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def __UpperCAmelCase ( cls : int , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Tuple ):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
| 103 | 1 |
from typing import List
import numpy as np
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = {key: len(A_ ) for key, value in gen_kwargs.items() if isinstance(A_, A_ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"""Sharding is ambiguous for this dataset: """
+ """we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"""
+ """\n""".join(f'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ """\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, """
+ """and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."""
) )
__magic_name__ = max(lists_lengths.values(), default=0 )
return max(1, A_ )
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = []
for group_idx in range(A_ ):
__magic_name__ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__magic_name__ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__magic_name__ = range(A_, start + num_shards_to_add )
shards_indices_per_group.append(A_ )
return shards_indices_per_group
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = _number_of_shards_in_gen_kwargs(A_ )
if num_shards == 1:
return [dict(A_ )]
else:
__magic_name__ = _distribute_shards(num_shards=A_, max_num_jobs=A_ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(A_, A_ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(A_ ) )
]
def a__ ( A_ ):
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key], A_ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = {len(A_ ) for value in gen_kwargs.values() if isinstance(A_, A_ )}
__magic_name__ = {}
for size in list_sizes:
__magic_name__ = list(range(A_ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__magic_name__ = dict(A_ )
for key, value in shuffled_kwargs.items():
if isinstance(A_, A_ ):
__magic_name__ = [value[i] for i in indices_per_size[len(A_ )]]
return shuffled_kwargs
| 76 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """sew-d"""
def __init__( self : List[str] , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[int]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : int=3072 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Any=256 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str=("p2c", "c2p") , UpperCamelCase__ : List[Any]="layer_norm" , UpperCamelCase__ : int="gelu_python" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=1E-7 , UpperCamelCase__ : List[Any]=1E-5 , UpperCamelCase__ : List[str]="group" , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase__ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=128 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=0.05 , UpperCamelCase__ : str=10 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[Any]="mean" , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[int]=256 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=2 , **UpperCamelCase__ : str , ) -> Dict:
"""simple docstring"""
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
__magic_name__ = hidden_size
__magic_name__ = feat_extract_norm
__magic_name__ = feat_extract_activation
__magic_name__ = list(UpperCamelCase__ )
__magic_name__ = list(UpperCamelCase__ )
__magic_name__ = list(UpperCamelCase__ )
__magic_name__ = conv_bias
__magic_name__ = num_conv_pos_embeddings
__magic_name__ = num_conv_pos_embedding_groups
__magic_name__ = len(self.conv_dim )
__magic_name__ = num_hidden_layers
__magic_name__ = intermediate_size
__magic_name__ = squeeze_factor
__magic_name__ = max_position_embeddings
__magic_name__ = position_buckets
__magic_name__ = share_att_key
__magic_name__ = relative_attention
__magic_name__ = norm_rel_ebd
__magic_name__ = list(UpperCamelCase__ )
__magic_name__ = hidden_act
__magic_name__ = num_attention_heads
__magic_name__ = hidden_dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = feat_proj_dropout
__magic_name__ = final_dropout
__magic_name__ = layer_norm_eps
__magic_name__ = feature_layer_norm_eps
__magic_name__ = initializer_range
__magic_name__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__magic_name__ = apply_spec_augment
__magic_name__ = mask_time_prob
__magic_name__ = mask_time_length
__magic_name__ = mask_time_min_masks
__magic_name__ = mask_feature_prob
__magic_name__ = mask_feature_length
__magic_name__ = mask_feature_min_masks
# ctc loss
__magic_name__ = ctc_loss_reduction
__magic_name__ = ctc_zero_infinity
# sequence classification
__magic_name__ = use_weighted_layer_sum
__magic_name__ = classifier_proj_size
@property
def _lowercase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 76 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 8 | import numpy as np
def A__ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: List[Any]= int(np.ceil((x_end - xa) / h ) )
SCREAMING_SNAKE_CASE__: Any= np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__: int= ya
SCREAMING_SNAKE_CASE__: Tuple= xa
for k in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: Any= f(snake_case_ , y[k] )
SCREAMING_SNAKE_CASE__: Optional[int]= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: Tuple= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: List[str]= f(x + h , y[k] + h * ka )
SCREAMING_SNAKE_CASE__: Tuple= y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
from ...processing_utils import ProcessorMixin
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = """SpeechT5FeatureExtractor"""
_lowerCamelCase = """SpeechT5Tokenizer"""
def __init__( self , __A , __A ):
super().__init__(__A , __A )
def __call__( self , *__A , **__A ):
__a = kwargs.pop("""audio""" , __A )
__a = kwargs.pop("""text""" , __A )
__a = kwargs.pop("""text_target""" , __A )
__a = kwargs.pop("""audio_target""" , __A )
__a = kwargs.pop("""sampling_rate""" , __A )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
__a = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
elif text is not None:
__a = self.tokenizer(__A , **__A )
else:
__a = None
if audio_target is not None:
__a = self.feature_extractor(audio_target=__A , *__A , sampling_rate=__A , **__A )
__a = targets["""input_values"""]
elif text_target is not None:
__a = self.tokenizer(__A , **__A )
__a = targets["""input_ids"""]
else:
__a = None
if inputs is None:
return targets
if targets is not None:
__a = labels
__a = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
__a = decoder_attention_mask
return inputs
def snake_case_ ( self , *__A , **__A ):
__a = kwargs.pop("""input_values""" , __A )
__a = kwargs.pop("""input_ids""" , __A )
__a = kwargs.pop("""labels""" , __A )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
__a = self.feature_extractor.pad(__A , *__A , **__A )
elif input_ids is not None:
__a = self.tokenizer.pad(__A , **__A )
else:
__a = None
if labels is not None:
if "input_ids" in labels or (isinstance(__A , __A ) and "input_ids" in labels[0]):
__a = self.tokenizer.pad(__A , **__A )
__a = targets["""input_ids"""]
else:
__a = self.feature_extractor.feature_size
__a = self.feature_extractor.num_mel_bins
__a = self.feature_extractor.pad(__A , *__A , **__A )
__a = feature_size_hack
__a = targets["""input_values"""]
else:
__a = None
if inputs is None:
return targets
if targets is not None:
__a = labels
__a = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
__a = decoder_attention_mask
return inputs
def snake_case_ ( self , *__A , **__A ):
return self.tokenizer.batch_decode(*__A , **__A )
def snake_case_ ( self , *__A , **__A ):
return self.tokenizer.decode(*__A , **__A )
| 209 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 3
class __UpperCAmelCase ( __A ):
"""simple docstring"""
pass
def a (lowerCAmelCase__ ):
for shard in shards:
for i in range(lowerCAmelCase__ ):
yield {"i": i, "shard": shard}
def a ():
__a = int(os.environ["""RANK"""] )
__a = int(os.environ["""WORLD_SIZE"""] )
__a = ArgumentParser()
parser.add_argument("""--streaming""" , type=lowerCAmelCase__ )
parser.add_argument("""--local_rank""" , type=lowerCAmelCase__ )
parser.add_argument("""--num_workers""" , type=lowerCAmelCase__ , default=0 )
__a = parser.parse_args()
__a = args.streaming
__a = args.num_workers
__a = {"""shards""": [f'''shard_{shard_idx}''' for shard_idx in range(lowerCAmelCase__ )]}
__a = IterableDataset.from_generator(lowerCAmelCase__ , gen_kwargs=lowerCAmelCase__ )
if not streaming:
__a = Dataset.from_list(list(lowerCAmelCase__ ) )
__a = split_dataset_by_node(lowerCAmelCase__ , rank=lowerCAmelCase__ , world_size=lowerCAmelCase__ )
__a = torch.utils.data.DataLoader(lowerCAmelCase__ , num_workers=lowerCAmelCase__ )
__a = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__a = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__a = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 209 | 1 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class A (snake_case__ ):
def __init__( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> List[str]:
'''simple docstring'''
super().__init__(
features=_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase , streaming=_UpperCAmelCase , num_proc=_UpperCAmelCase , **_UpperCAmelCase , )
_snake_case : str = Generator(
cache_dir=_UpperCAmelCase , features=_UpperCAmelCase , generator=_UpperCAmelCase , gen_kwargs=_UpperCAmelCase , **_UpperCAmelCase , )
def __a ( self ) -> str:
'''simple docstring'''
if self.streaming:
_snake_case : Dict = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
_snake_case : Any = None
_snake_case : Tuple = None
_snake_case : Tuple = None
_snake_case : str = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase , download_mode=_UpperCAmelCase , verification_mode=_UpperCAmelCase , base_path=_UpperCAmelCase , num_proc=self.num_proc , )
_snake_case : int = self.builder.as_dataset(
split='''train''' , verification_mode=_UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 326 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : Tuple = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 538 | 0 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __UpperCamelCase ( A = 8 ):
UpperCamelCase__ = ascii_letters + digits + punctuation
return "".join(secrets.choice(A ) for _ in range(A ) )
def __UpperCamelCase ( A , A ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(A )
UpperCamelCase__ = i // 3
UpperCamelCase__ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCamelCase__ = (
chars_incl
+ random(A , quotient + remainder )
+ random(A , A )
+ random(A , A )
)
UpperCamelCase__ = list(A )
shuffle(A )
return "".join(A )
# random is a generalised function for letters, characters and numbers
def __UpperCamelCase ( A , A ):
return "".join(secrets.choice(A ) for _ in range(A ) )
def __UpperCamelCase ( A , A ):
pass # Put your code here...
def __UpperCamelCase ( A , A ):
pass # Put your code here...
def __UpperCamelCase ( A , A ):
pass # Put your code here...
def __UpperCamelCase ( A , A = 8 ):
if len(A ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCamelCase__ = any(char in ascii_uppercase for char in password )
UpperCamelCase__ = any(char in ascii_lowercase for char in password )
UpperCamelCase__ = any(char in digits for char in password )
UpperCamelCase__ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __UpperCamelCase ( ):
UpperCamelCase__ = int(input('''Please indicate the max length of your password: ''' ).strip() )
UpperCamelCase__ = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' , password_generator(A ) )
print(
'''Alternative Password generated:''' , alternative_password_generator(A , A ) , )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 469 | import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ =logging.get_logger(__name__)
class _A ( __UpperCamelCase ):
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 469 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = ort.SessionOptions()
UpperCamelCase : str = False
return options
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCamelCase : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCamelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
UpperCamelCase : Optional[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=lowerCamelCase , feature_extractor=lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase )
UpperCamelCase : List[Any] = "A red cat sitting on a park bench"
UpperCamelCase : List[Any] = np.random.RandomState(0 )
UpperCamelCase : int = pipe(
prompt=lowerCamelCase , image=lowerCamelCase , mask_image=lowerCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCamelCase , output_type="np" , )
UpperCamelCase : int = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 173 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''table-transformer'''
__SCREAMING_SNAKE_CASE = ['''past_key_values''']
__SCREAMING_SNAKE_CASE = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=3 , lowerCamelCase=1_00 , lowerCamelCase=6 , lowerCamelCase=20_48 , lowerCamelCase=8 , lowerCamelCase=6 , lowerCamelCase=20_48 , lowerCamelCase=8 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="relu" , lowerCamelCase=2_56 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1.0 , lowerCamelCase=False , lowerCamelCase="sine" , lowerCamelCase="resnet50" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=1 , lowerCamelCase=5 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=1 , lowerCamelCase=5 , lowerCamelCase=2 , lowerCamelCase=0.1 , **lowerCamelCase , ) -> Dict:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCamelCase : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
UpperCamelCase : Optional[int] = backbone_config.get("model_type" )
UpperCamelCase : Optional[int] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase : Any = config_class.from_dict(lowerCamelCase )
# set timm attributes to None
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = None, None, None
UpperCamelCase : Any = use_timm_backbone
UpperCamelCase : Dict = backbone_config
UpperCamelCase : Tuple = num_channels
UpperCamelCase : str = num_queries
UpperCamelCase : Tuple = d_model
UpperCamelCase : List[str] = encoder_ffn_dim
UpperCamelCase : List[Any] = encoder_layers
UpperCamelCase : List[Any] = encoder_attention_heads
UpperCamelCase : Optional[int] = decoder_ffn_dim
UpperCamelCase : str = decoder_layers
UpperCamelCase : Optional[Any] = decoder_attention_heads
UpperCamelCase : List[str] = dropout
UpperCamelCase : Any = attention_dropout
UpperCamelCase : int = activation_dropout
UpperCamelCase : int = activation_function
UpperCamelCase : List[str] = init_std
UpperCamelCase : List[str] = init_xavier_std
UpperCamelCase : Dict = encoder_layerdrop
UpperCamelCase : Any = decoder_layerdrop
UpperCamelCase : Tuple = encoder_layers
UpperCamelCase : int = auxiliary_loss
UpperCamelCase : Optional[Any] = position_embedding_type
UpperCamelCase : int = backbone
UpperCamelCase : List[Any] = use_pretrained_backbone
UpperCamelCase : Dict = dilation
# Hungarian matcher
UpperCamelCase : List[Any] = class_cost
UpperCamelCase : int = bbox_cost
UpperCamelCase : List[str] = giou_cost
# Loss coefficients
UpperCamelCase : List[Any] = mask_loss_coefficient
UpperCamelCase : List[str] = dice_loss_coefficient
UpperCamelCase : str = bbox_loss_coefficient
UpperCamelCase : Optional[int] = giou_loss_coefficient
UpperCamelCase : Any = eos_coefficient
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return self.d_model
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
'''simple docstring'''
return 1e-5
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return 12
| 173 | 1 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class _a ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 706 | import os
import string
import sys
UpperCamelCase = 1 << 8
UpperCamelCase = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
UpperCamelCase = KEYMAP['up']
UpperCamelCase = KEYMAP['left']
if sys.platform == "win32":
UpperCamelCase = []
UpperCamelCase = {
b'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
b'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
b'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
b'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
b'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
UpperCamelCase = ord(str(i))
def lowerCamelCase_ ( ) -> Tuple:
if os.name == "nt":
import msvcrt
__A : Optional[int] = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_lowercase ) == 0:
# Read the keystroke
__A : Union[str, Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__A : Tuple = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__A : int = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(_lowercase )
if ord(_lowercase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
__A : Tuple = chr(KEYMAP["esc"] )
except KeyError:
__A : Union[str, Any] = cha[1]
else:
__A : Optional[int] = ch.decode(_lowercase )
else:
__A : Dict = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__A : str = sys.stdin.fileno()
__A : Tuple = termios.tcgetattr(_lowercase )
try:
tty.setraw(_lowercase )
__A : int = sys.stdin.read(1 )
finally:
termios.tcsetattr(_lowercase , termios.TCSADRAIN , _lowercase )
return ch
def lowerCamelCase_ ( ) -> Union[str, Any]:
__A : Any = get_raw_chars()
if ord(_lowercase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_lowercase ) == KEYMAP["esc"]:
__A : Tuple = get_raw_chars()
if ord(_lowercase ) == KEYMAP["mod_int"]:
__A : Optional[int] = get_raw_chars()
if ord(_lowercase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_lowercase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_lowercase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 387 | 0 |
"""simple docstring"""
import os
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
lowerCAmelCase = os.path.join(_UpperCAmelCase , 'triangle.txt' )
with open(_UpperCAmelCase ) as f:
lowerCAmelCase = f.readlines()
lowerCAmelCase = []
for line in triangle:
lowerCAmelCase = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(_UpperCAmelCase ) )
a.append(_UpperCAmelCase )
for i in range(1 , len(_UpperCAmelCase ) ):
for j in range(len(a[i] ) ):
lowerCAmelCase = a[i - 1][j] if j != len(a[i - 1] ) else 0
lowerCAmelCase = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_UpperCAmelCase , _UpperCAmelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 4 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 167 | 0 |
"""simple docstring"""
from typing import Any
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : Any ) -> int:
"""simple docstring"""
_lowerCAmelCase = data
_lowerCAmelCase = None
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] ) -> str:
"""simple docstring"""
_lowerCAmelCase = None
def __lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = self.head
while temp is not None:
print(temp.data , end=' ' )
_lowerCAmelCase = temp.next
print()
def __lowerCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Any ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = Node(__UpperCamelCase )
_lowerCAmelCase = self.head
_lowerCAmelCase = new_node
def __lowerCamelCase ( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
_lowerCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCAmelCase = node_a.next
_lowerCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_lowerCAmelCase = node_a.next
if node_a is None or node_a is None:
return
_lowerCAmelCase , _lowerCAmelCase = node_a.data, node_a.data
if __name__ == "__main__":
_snake_case = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 719 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: list[int] ):
"""simple docstring"""
_lowerCAmelCase = []
if len(SCREAMING_SNAKE_CASE ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase = nums.pop(0 )
_lowerCAmelCase = permute(SCREAMING_SNAKE_CASE )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE )
result.extend(SCREAMING_SNAKE_CASE )
nums.append(SCREAMING_SNAKE_CASE )
return result
def __snake_case ( SCREAMING_SNAKE_CASE: Any ):
"""simple docstring"""
def backtrack(SCREAMING_SNAKE_CASE: Tuple ):
if start == len(SCREAMING_SNAKE_CASE ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase , _lowerCAmelCase = nums[i], nums[start]
backtrack(start + 1 )
_lowerCAmelCase , _lowerCAmelCase = nums[i], nums[start] # backtrack
_lowerCAmelCase = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
_snake_case = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 491 | 0 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, **UpperCAmelCase )-> Tuple:
"""simple docstring"""
lowercase = AutoConfig.from_pretrained(UpperCAmelCase, **UpperCAmelCase )
lowercase = AutoModelForSeqaSeqLM.from_config(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
AutoTokenizer.from_pretrained(UpperCAmelCase ).save_pretrained(UpperCAmelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 604 | import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
A_ = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
A_ = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def __UpperCAmelCase ( UpperCAmelCase )-> Optional[Any]:
"""simple docstring"""
lowercase = set()
lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase = char
lowercase = set(UpperCAmelCase )
return pairs
class __lowercase ( _A ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Union[str, Any]="</s>" , __lowerCamelCase : int="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : int="<unk>" , __lowerCamelCase : Optional[int]="<pad>" , __lowerCamelCase : Any="<mask>" , **__lowerCamelCase : int , ) -> Any:
'''simple docstring'''
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
lowercase = vocab_file
lowercase = merges_file
lowercase = {}
lowercase = 0
lowercase = 1
lowercase = 2
lowercase = 3
self.add_from_file(__lowerCamelCase )
lowercase = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
lowercase = merges_handle.read().split('''\n''' )[:-1]
lowercase = [tuple(merge.split()[:-1] ) for merge in merges]
lowercase = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
lowercase = {}
def __a ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase = [self.cls_token_id]
lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def __a ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : int ) -> str:
'''simple docstring'''
return len(self.encoder )
def __a ( self : int ) -> Any:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : int , __lowerCamelCase : Any ) -> Optional[int]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase = tuple(__lowerCamelCase )
lowercase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowercase = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
lowercase = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase ,lowercase = bigram
lowercase = []
lowercase = 0
while i < len(__lowerCamelCase ):
try:
lowercase = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase = tuple(__lowerCamelCase )
lowercase = new_word
if len(__lowerCamelCase ) == 1:
break
else:
lowercase = get_pairs(__lowerCamelCase )
lowercase = '''@@ '''.join(__lowerCamelCase )
lowercase = word[:-4]
lowercase = word
return word
def __a ( self : List[str] , __lowerCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
lowercase = []
lowercase = re.findall(r'''\S+\n?''' , __lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def __a ( self : Tuple , __lowerCamelCase : List[Any] ) -> Any:
'''simple docstring'''
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def __a ( self : str , __lowerCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(__lowerCamelCase , self.unk_token )
def __a ( self : Optional[Any] , __lowerCamelCase : Any ) -> List[str]:
'''simple docstring'''
lowercase = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __a ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.merges_file , __lowerCamelCase )
return out_vocab_file, out_merge_file
def __a ( self : str , __lowerCamelCase : List[str] ) -> List[str]:
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
lowercase = f.readlines()
for lineTmp in lines:
lowercase = lineTmp.strip()
lowercase = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
lowercase = line[:idx]
lowercase = len(self.encoder )
| 604 | 1 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_A : int =(
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
_A : Union[str, Any] =(
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
_A : Union[str, Any] =(
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
_A : Union[str, Any] =(
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
_A : Dict =(
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
_A : int =(
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
_A : Optional[int] =(
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def __UpperCamelCase ( ) -> Dict:
_lowercase , _lowercase : Optional[Any] = randrange(len(_lowercase ) ), randrange(len(_lowercase ) )
_lowercase : List[Any] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
_lowercase , _lowercase : str = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __UpperCamelCase ( _lowercase = 100 ) -> Any:
return (generate_random_hand() for _ in range(_lowercase ))
@pytest.mark.parametrize('hand, expected', _lowercase )
def __UpperCamelCase ( _lowercase, _lowercase ) -> Union[str, Any]:
assert PokerHand(_lowercase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected', _lowercase )
def __UpperCamelCase ( _lowercase, _lowercase ) -> List[str]:
assert PokerHand(_lowercase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values', _lowercase )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Optional[Any]:
_lowercase : List[Any] = PokerHand(_lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected', _lowercase )
def __UpperCamelCase ( _lowercase, _lowercase ) -> Union[str, Any]:
assert PokerHand(_lowercase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected', _lowercase )
def __UpperCamelCase ( _lowercase, _lowercase ) -> Union[str, Any]:
assert PokerHand(_lowercase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected', _lowercase )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> Dict:
assert PokerHand(_lowercase ).compare_with(PokerHand(_lowercase ) ) == expected
@pytest.mark.parametrize('hand, other, expected', generate_random_hands() )
def __UpperCamelCase ( _lowercase, _lowercase, _lowercase ) -> str:
assert PokerHand(_lowercase ).compare_with(PokerHand(_lowercase ) ) == expected
def __UpperCamelCase ( ) -> Dict:
_lowercase : Optional[int] = [PokerHand(_lowercase ) for hand in SORTED_HANDS]
_lowercase : Tuple = poker_hands.copy()
shuffle(_lowercase )
_lowercase : Optional[int] = chain(sorted(_lowercase ) )
for index, hand in enumerate(_lowercase ):
assert hand == poker_hands[index]
def __UpperCamelCase ( ) -> Any:
# Test that five high straights are compared correctly.
_lowercase : List[Any] = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=_lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __UpperCamelCase ( ) -> List[str]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
_lowercase : Tuple = PokerHand('2C 4S AS 3D 5C' )
_lowercase : Any = True
_lowercase : Optional[Any] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __UpperCamelCase ( ) -> Any:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
_lowercase : Optional[int] = 0
_lowercase : int = os.path.abspath(os.path.dirname(_lowercase ) )
_lowercase : Optional[int] = os.path.join(_lowercase, 'poker_hands.txt' )
with open(_lowercase ) as file_hand:
for line in file_hand:
_lowercase : Optional[int] = line[:14].strip()
_lowercase : str = line[15:].strip()
_lowercase , _lowercase : str = PokerHand(_lowercase ), PokerHand(_lowercase )
_lowercase : Optional[int] = player.compare_with(_lowercase )
if output == "Win":
answer += 1
assert answer == 376
| 4 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[Any] ={'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple =['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =[
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_A : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case : Tuple = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __lowercase ):
def __init__( self, *_a, **_a ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead.", __a, )
super().__init__(*__a, **__a )
| 693 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def snake_case ( UpperCamelCase__ : Any ) -> Optional[Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
lowerCamelCase : Any = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowerCamelCase : List[Any] = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowerCamelCase : str = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowerCamelCase : Tuple = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowerCamelCase : Optional[Any] = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
lowerCamelCase : Union[str, Any] = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCamelCase : List[str] = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowerCamelCase : List[Any] = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowerCamelCase : int = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowerCamelCase : Union[str, Any] = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
lowerCamelCase : Dict = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCamelCase : List[Any] = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowerCamelCase : int = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowerCamelCase : Any = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
lowerCamelCase : Dict = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowerCamelCase : str = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowerCamelCase : List[Any] = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowerCamelCase : str = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
lowerCamelCase : Tuple = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
lowerCamelCase : int = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCamelCase : str = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
lowerCamelCase : List[Any] = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowerCamelCase : Dict = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
lowerCamelCase : Optional[Any] = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ) -> Any:
for key in orig_state_dict.copy().keys():
lowerCamelCase : List[Any] = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : Tuple = key.split(""".""" )
lowerCamelCase , lowerCamelCase : List[str] = int(key_split[2] ), int(key_split[4] )
lowerCamelCase : Optional[Any] = config.vision_config.hidden_size
if "weight" in key:
lowerCamelCase : Dict = val[:dim, :]
lowerCamelCase : Any = val[dim : dim * 2, :]
lowerCamelCase : int = val[-dim:, :]
else:
lowerCamelCase : Tuple = val[:dim]
lowerCamelCase : Tuple = val[dim : dim * 2]
lowerCamelCase : Union[str, Any] = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : str = key.split(""".""" )
lowerCamelCase : Union[str, Any] = int(key_split[3] )
lowerCamelCase : int = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase : List[Any] = val[:dim, :]
lowerCamelCase : List[Any] = val[
dim : dim * 2, :
]
lowerCamelCase : List[str] = val[-dim:, :]
else:
lowerCamelCase : List[Any] = val[:dim]
lowerCamelCase : str = val[dim : dim * 2]
lowerCamelCase : Optional[Any] = val[-dim:]
else:
lowerCamelCase : int = rename_key(UpperCamelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCamelCase : str = val.squeeze_()
else:
lowerCamelCase : Any = val
return orig_state_dict
def snake_case ( ) -> List[Any]:
lowerCamelCase : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : List[str] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple="groupvit-gcc-yfcc" , UpperCamelCase__ : List[str]=False ) -> Optional[int]:
lowerCamelCase : Optional[int] = GroupViTConfig()
lowerCamelCase : Tuple = GroupViTModel(UpperCamelCase__ ).eval()
lowerCamelCase : List[str] = torch.load(UpperCamelCase__ , map_location="""cpu""" )["""model"""]
lowerCamelCase : Any = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase__ ) == 0)
# verify result
lowerCamelCase : Optional[int] = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowerCamelCase : int = prepare_img()
lowerCamelCase : Optional[Any] = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""pt""" )
with torch.no_grad():
lowerCamelCase : Dict = model(**UpperCamelCase__ )
if model_name == "groupvit-gcc-yfcc":
lowerCamelCase : List[str] = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCamelCase : Tuple = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , UpperCamelCase__ , atol=1E-3 )
processor.save_pretrained(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
print("""Successfully saved processor and model to""" , UpperCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(UpperCamelCase__ , organization="""nielsr""" )
model.push_to_hub(UpperCamelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
__lowerCamelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
__lowerCamelCase :int = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 222 | 0 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __A( __UpperCAmelCase ):
def __init__( self, A, A = None, A = None, A = None, A = False, A = False, A = None, **A, ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase, split=__lowerCAmelCase, features=__lowerCAmelCase, cache_dir=__lowerCAmelCase, keep_in_memory=__lowerCAmelCase, streaming=__lowerCAmelCase, num_proc=__lowerCAmelCase, **__lowerCAmelCase, )
_UpperCamelCase = path_or_paths if isinstance(__lowerCAmelCase, __lowerCAmelCase ) else {self.split: path_or_paths}
_UpperCamelCase = Text(
cache_dir=__lowerCAmelCase, data_files=__lowerCAmelCase, features=__lowerCAmelCase, **__lowerCAmelCase, )
def _UpperCamelCase ( self ):
"""simple docstring"""
if self.streaming:
_UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase, download_mode=__lowerCAmelCase, verification_mode=__lowerCAmelCase, base_path=__lowerCAmelCase, num_proc=self.num_proc, )
_UpperCamelCase = self.builder.as_dataset(
split=self.split, verification_mode=__lowerCAmelCase, in_memory=self.keep_in_memory )
return dataset
| 707 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ) ) )
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase ):
if dataset.ndim != value_array.ndim:
_UpperCamelCase = (
'''Wrong input data\'s dimensions... '''
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCamelCase = (
'''Wrong input data\'s shape... '''
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
_UpperCamelCase = (
'''Input data have different datatype... '''
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(lowerCAmelCase )
_UpperCamelCase = []
for value in value_array:
_UpperCamelCase = euclidean(lowerCAmelCase , dataset[0] )
_UpperCamelCase = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCamelCase = euclidean(lowerCAmelCase , lowerCAmelCase )
if dist > temp_dist:
_UpperCamelCase = temp_dist
_UpperCamelCase = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase ):
return np.dot(lowerCAmelCase , lowerCAmelCase ) / (norm(lowerCAmelCase ) * norm(lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = [
"""word_embeddings_layernorm.weight""",
"""word_embeddings_layernorm.bias""",
"""input_layernorm.weight""",
"""input_layernorm.bias""",
"""post_attention_layernorm.weight""",
"""post_attention_layernorm.bias""",
"""self_attention.dense.bias""",
"""mlp.dense_4h_to_h.bias""",
"""ln_f.weight""",
"""ln_f.bias""",
]
SCREAMING_SNAKE_CASE = [
"""mlp.dense_4h_to_h.weight""",
"""self_attention.dense.weight""",
]
def snake_case_ ( lowercase__ , lowercase__ ):
UpperCAmelCase__ : Any = {
"word_embeddings.weight": "word_embeddings.weight",
"word_embeddings.norm.weight": "word_embeddings_layernorm.weight",
"word_embeddings.norm.bias": "word_embeddings_layernorm.bias",
"weight": "ln_f.weight",
"bias": "ln_f.bias",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCAmelCase__ : List[Any] = int(re.match(R".*layer_(\d*).*" , lowercase__ )[1] )
layer_number -= 3
return F"""h.{layer_number}.""" + key
def snake_case_ ( lowercase__ ):
if dtype == torch.bool:
return 1 / 8
UpperCAmelCase__ : Dict = re.search(R"[^\d](\d+)$" , str(lowercase__ ) )
if bit_search is None:
raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" )
UpperCAmelCase__ : Dict = int(bit_search.groups()[0] )
return bit_size // 8
def snake_case_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
# Construct model
if bloom_config_file == "":
UpperCAmelCase__ : Tuple = BloomConfig()
else:
UpperCAmelCase__ : List[str] = BloomConfig.from_json_file(lowercase__ )
if shard_model:
UpperCAmelCase__ : Any = os.listdir(lowercase__ )
UpperCAmelCase__ : Any = sorted(filter(lambda lowercase__ : s.startswith("layer" ) and "model_00" in s , lowercase__ ) )
UpperCAmelCase__ : List[Any] = {"weight_map": {}, "metadata": {}}
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Tuple = BloomConfig()
for j, file in enumerate(lowercase__ ):
print("Processing file: {}".format(lowercase__ ) )
UpperCAmelCase__ : List[str] = None
for i in range(lowercase__ ):
# load all TP files
UpperCAmelCase__ : Optional[Any] = file.replace("model_00" , F"""model_0{i}""" )
UpperCAmelCase__ : Optional[Any] = torch.load(os.path.join(lowercase__ , lowercase__ ) , map_location="cpu" )
# Rename keys in the transformers names
UpperCAmelCase__ : List[str] = list(temp.keys() )
for key in keys:
UpperCAmelCase__ : Tuple = temp.pop(lowercase__ )
if tensors is None:
UpperCAmelCase__ : List[Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase__ : str = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase__ : Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=lowercase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase__ : int = tensors[key] / pretraining_tp
torch.save(
lowercase__ , os.path.join(
lowercase__ , "pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) , str(len(lowercase__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCAmelCase__ : Optional[Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCAmelCase__ : Optional[Any] = "pytorch_model_{}-of-{}.bin".format(
str(j + 1 ).zfill(5 ) , str(len(lowercase__ ) ).zfill(5 ) )
UpperCAmelCase__ : int = BloomConfig()
UpperCAmelCase__ : Union[str, Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
UpperCAmelCase__ : Tuple = total_size
with open(lowercase__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(lowercase__ , WEIGHTS_NAME + ".index.json" ) , "w" , encoding="utf-8" ) as f:
UpperCAmelCase__ : Dict = json.dumps(lowercase__ , indent=2 , sort_keys=lowercase__ ) + "\n"
f.write(lowercase__ )
else:
UpperCAmelCase__ : List[str] = BloomModel(lowercase__ )
UpperCAmelCase__ : List[Any] = os.listdir(lowercase__ )
UpperCAmelCase__ : str = sorted(filter(lambda lowercase__ : s.startswith("layer" ) and "model_00" in s , lowercase__ ) )
UpperCAmelCase__ : int = None
for i, file in enumerate(lowercase__ ):
UpperCAmelCase__ : Optional[Any] = None
for i in range(lowercase__ ):
# load all TP files
UpperCAmelCase__ : List[Any] = file.replace("model_00" , F"""model_0{i}""" )
UpperCAmelCase__ : Optional[int] = torch.load(os.path.join(lowercase__ , lowercase__ ) , map_location="cpu" )
# Rename keys in the transformers names
UpperCAmelCase__ : Dict = list(temp.keys() )
for key in keys:
UpperCAmelCase__ : Optional[int] = temp.pop(lowercase__ )
if tensors is None:
UpperCAmelCase__ : List[str] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase__ : Union[str, Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase__ : List[str] = torch.cat([tensors[key], temp[key]] , dim=lowercase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(lowercase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase__ : Dict = tensors[key] / pretraining_tp
UpperCAmelCase__ : Any = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
UpperCAmelCase__ : Union[str, Any] = set(other_keys.missing_keys )
else:
UpperCAmelCase__ : Dict = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(lowercase__ , exist_ok=lowercase__ )
UpperCAmelCase__ : Dict = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase__ : List[str] = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
UpperCAmelCase__ : Union[str, Any] = model.to(config.torch_dtype )
torch.save(model.state_dict() , lowercase__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bloom_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path to the Megatron-LM checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--bloom_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--shard_model""",
action="""store_true""",
help="""An optional setting to shard the output model \nThis enables sharding the converted checkpoint""",
)
parser.add_argument(
"""--pretraining_tp""",
default=4,
type=int,
help="""Pretraining TP rank that has been used when training the model in Megatron-LM \n""",
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 199 |
'''simple docstring'''
def snake_case_ ( lowercase__ ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
SCREAMING_SNAKE_CASE = int(input("""Enter number: """).strip())
print(F'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 199 | 1 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = get_failure_array(lowerCAmelCase )
# 2) Step through text searching for pattern
UpperCAmelCase__ : Any = 0, 0 # index into text, pattern
while i < len(lowerCAmelCase ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
UpperCAmelCase__ : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = [0]
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : List[Any] = 1
while j < len(lowerCAmelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
UpperCAmelCase__ : Union[str, Any] = failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase )
return failure
if __name__ == "__main__":
# Test 1)
A__ : Tuple = """abc1abc12"""
A__ : Tuple = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
A__ : List[Any] = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
A__ : str = """ABABX"""
A__ : List[str] = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
A__ : Optional[int] = """AAAB"""
A__ : Optional[int] = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
A__ : Union[str, Any] = """abcdabcy"""
A__ : Tuple = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
A__ : Tuple = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 714 |
"""simple docstring"""
from math import isqrt
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = False
return [i for i in range(2 , lowerCAmelCase ) if is_prime[i]]
def a__ ( lowerCAmelCase : int = 10**8 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = calculate_prime_numbers(max_number // 2 )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = len(lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 660 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = ProphetNetTokenizer
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
super().setUp()
__a : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : List[str] = '''UNwant\u00E9d,running'''
__a : List[Any] = '''unwanted, running'''
return input_text, output_text
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.tokenizer_class(self.vocab_file )
__a : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def _lowerCamelCase ( self ):
__a : Any = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def _lowerCamelCase ( self ):
__a : Optional[int] = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCamelCase ( self ):
__a : List[Any] = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def _lowerCamelCase ( self ):
__a : Optional[Any] = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCamelCase ( self ):
__a : int = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowerCamelCase ( self ):
__a : List[Any] = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCamelCase ( self ):
__a : List[Any] = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowerCamelCase ( self ):
__a : Any = BasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def _lowerCamelCase ( self ):
__a : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__a : str = {}
for i, token in enumerate(_UpperCAmelCase ):
__a : Tuple = i
__a : Dict = WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
__a : Tuple = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__a : List[Any] = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
__a : List[str] = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
__a : Dict = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def _lowerCamelCase ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def _lowerCamelCase ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def _lowerCamelCase ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def _lowerCamelCase ( self ):
__a : Tuple = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
__a : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=_UpperCAmelCase )
__a : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_UpperCAmelCase )
__a : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__a : List[str] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102] | 52 |
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def __lowercase ( _a ):
snake_case_ : Any = np.max(_a , axis=-1 , keepdims=_a )
snake_case_ : Optional[int] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_a )
class _UpperCAmelCase ( lowerCAmelCase__):
def _snake_case ( self : List[Any] , **lowercase_ : Union[str, Any] ):
snake_case_ : List[Any] = {}
if "second_text" in kwargs:
snake_case_ : Optional[int] = kwargs['''second_text''']
return preprocess_kwargs, {}, {}
def _snake_case ( self : Dict , lowercase_ : Dict , lowercase_ : Tuple=None ):
return self.tokenizer(lowercase_ , text_pair=lowercase_ , return_tensors=self.framework )
def _snake_case ( self : Union[str, Any] , lowercase_ : str ):
return self.model(**lowercase_ )
def _snake_case ( self : Optional[int] , lowercase_ : int ):
snake_case_ : str = model_outputs.logits[0].numpy()
snake_case_ : str = softmax(lowercase_ )
snake_case_ : int = np.argmax(lowercase_ )
snake_case_ : str = self.model.config.idalabel[best_class]
snake_case_ : Optional[int] = probabilities[best_class].item()
snake_case_ : Tuple = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 123 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
a__ : str = ["""gpt2"""]
a__ : Any = """gpt2"""
if is_tf_available():
class lowercase ( tf.Module ):
"""simple docstring"""
def __init__( self : str , a_ : Optional[Any] ):
"""simple docstring"""
super().__init__()
lowerCamelCase__ = tokenizer
lowerCamelCase__ = AutoConfig.from_pretrained(a_ )
lowerCamelCase__ = TFGPTaLMHeadModel.from_config(a_ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) )
def _UpperCamelCase ( self : List[str] , a_ : Any ):
"""simple docstring"""
lowerCamelCase__ = self.tokenizer(a_ )
lowerCamelCase__ = tokenized["""input_ids"""].to_tensor()
lowerCamelCase__ = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowerCamelCase__ = self.model(input_ids=a_ , attention_mask=a_ )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
lowerCamelCase__ = [GPTaTokenizer.from_pretrained(a_ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowerCamelCase__ = [TFGPTaTokenizer.from_pretrained(a_ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCamelCase__ = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
lowerCamelCase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _UpperCamelCase ( self : int ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowerCamelCase__ = tokenizer([test_inputs] , return_tensors="""tf""" )
lowerCamelCase__ = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowerCamelCase__ = python_outputs[key].numpy()
lowerCamelCase__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(a_ , tf.intaa ) == tf_outputs_values ) )
@slow
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase__ = tf.function(a_ )
for test_inputs in self.test_sentences:
lowerCamelCase__ = tf.constant(a_ )
lowerCamelCase__ = compiled_tokenizer(a_ )
lowerCamelCase__ = tf_tokenizer(a_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase__ = ModelToSave(tokenizer=a_ )
lowerCamelCase__ = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase__ = model.serving(a_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCamelCase__ = Path(a_ ) / """saved.model"""
tf.saved_model.save(a_ , a_ , signatures={"""serving_default""": model.serving} )
lowerCamelCase__ = tf.saved_model.load(a_ )
lowerCamelCase__ = loaded_model.signatures["""serving_default"""](a_ )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _UpperCamelCase ( self : int ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase__ = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase__ = tf_tokenizer(a_ ) # Build model with some sample inputs
lowerCamelCase__ = tf_tokenizer.get_config()
lowerCamelCase__ = TFGPTaTokenizer.from_config(a_ )
lowerCamelCase__ = model_from_config(a_ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _UpperCamelCase ( self : str ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowerCamelCase__ = 12_31_23
for max_length in [3, 5, 10_24]:
lowerCamelCase__ = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCamelCase__ = tf_tokenizer(a_ , max_length=a_ )
lowerCamelCase__ = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 715 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _UpperCamelCase ( self : str , a_ : Optional[int] , a_ : str , a_ : Tuple ):
"""simple docstring"""
lowerCamelCase__ = TextaTextGenerationPipeline(model=a_ , tokenizer=a_ )
return generator, ["Something to write", "Something else"]
def _UpperCamelCase ( self : Tuple , a_ : int , a_ : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = generator("""Something there""" )
self.assertEqual(a_ , [{"""generated_text""": ANY(a_ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
lowerCamelCase__ = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=a_ )
self.assertEqual(
a_ , [
[{"""generated_text""": ANY(a_ )}, {"""generated_text""": ANY(a_ )}],
[{"""generated_text""": ANY(a_ )}, {"""generated_text""": ANY(a_ )}],
] , )
lowerCamelCase__ = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=a_ )
self.assertEqual(
a_ , [
[{"""generated_text""": ANY(a_ )}, {"""generated_text""": ANY(a_ )}],
[{"""generated_text""": ANY(a_ )}, {"""generated_text""": ANY(a_ )}],
] , )
with self.assertRaises(a_ ):
generator(4 )
@require_torch
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
lowerCamelCase__ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
lowerCamelCase__ = generator("""Something there""" , do_sample=a_ )
self.assertEqual(a_ , [{"""generated_text""": """"""}] )
lowerCamelCase__ = 3
lowerCamelCase__ = generator(
"""Something there""" , num_return_sequences=a_ , num_beams=a_ , )
lowerCamelCase__ = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(a_ , a_ )
lowerCamelCase__ = generator("""This is a test""" , do_sample=a_ , num_return_sequences=2 , return_tensors=a_ )
self.assertEqual(
a_ , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
lowerCamelCase__ = generator.model.config.eos_token_id
lowerCamelCase__ = """<pad>"""
lowerCamelCase__ = generator(
["""This is a test""", """This is a second test"""] , do_sample=a_ , num_return_sequences=2 , batch_size=2 , return_tensors=a_ , )
self.assertEqual(
a_ , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
lowerCamelCase__ = generator("""Something there""" , do_sample=a_ )
self.assertEqual(a_ , [{"""generated_text""": """"""}] )
| 235 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ (snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = MgpstrTokenizer
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Optional[Any] = {}
lowerCamelCase : List[str] = False
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
super().setUp()
# fmt: off
_lowerCAmelCase :Union[str, Any] = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_lowerCAmelCase :Optional[int] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
_lowerCAmelCase :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
def SCREAMING_SNAKE_CASE__ ( self: int , **_UpperCAmelCase: Optional[int] ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: str ):
_lowerCAmelCase :List[Any] = 'tester'
_lowerCAmelCase :Dict = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :str = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_lowerCAmelCase :Union[str, Any] = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
_lowerCAmelCase :List[str] = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , 1 )
_lowerCAmelCase :Union[str, Any] = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE__ ( self: Dict ):
_lowerCAmelCase :Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_lowerCAmelCase , _lowerCAmelCase :List[Any] = self.get_input_output_texts(_UpperCAmelCase )
_lowerCAmelCase :List[str] = tokenizer.tokenize(_UpperCAmelCase )
_lowerCAmelCase :int = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
_lowerCAmelCase :Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Dict = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ) , 0 )
_lowerCAmelCase :List[Any] = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(text_a.replace(' ' , '' ) , _UpperCAmelCase )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def SCREAMING_SNAKE_CASE__ ( self: int ):
pass | 687 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 687 | 1 |
__UpperCAmelCase = 2_56
# Modulus to hash a string
__UpperCAmelCase = 1_00_00_03
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
if p_len > t_len:
return False
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
SCREAMING_SNAKE_CASE_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
SCREAMING_SNAKE_CASE_ = (modulus_power * alphabet_size) % modulus
for i in range(0, t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
SCREAMING_SNAKE_CASE_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def A__ ( ):
SCREAMING_SNAKE_CASE_ = '''abc1abc12'''
SCREAMING_SNAKE_CASE_ = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
SCREAMING_SNAKE_CASE_ = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(__lowerCamelCase, __lowerCamelCase ) and not rabin_karp(__lowerCamelCase, __lowerCamelCase )
# Test 2)
SCREAMING_SNAKE_CASE_ = '''ABABX'''
SCREAMING_SNAKE_CASE_ = '''ABABZABABYABABX'''
assert rabin_karp(__lowerCamelCase, __lowerCamelCase )
# Test 3)
SCREAMING_SNAKE_CASE_ = '''AAAB'''
SCREAMING_SNAKE_CASE_ = '''ABAAAAAB'''
assert rabin_karp(__lowerCamelCase, __lowerCamelCase )
# Test 4)
SCREAMING_SNAKE_CASE_ = '''abcdabcy'''
SCREAMING_SNAKE_CASE_ = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(__lowerCamelCase, __lowerCamelCase )
# Test 5)
SCREAMING_SNAKE_CASE_ = '''Lü'''
SCREAMING_SNAKE_CASE_ = '''Lüsai'''
assert rabin_karp(__lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = '''Lue'''
assert not rabin_karp(__lowerCamelCase, __lowerCamelCase )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 718 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ =TF_MODEL_FOR_MASKED_LM_MAPPING
def _UpperCamelCase ( self ) -> int:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
SCREAMING_SNAKE_CASE_ = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1E-05, '''token''': 38015, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1E-05, '''token''': 25506, '''token_str''': ''' accuser'''},
] , )
SCREAMING_SNAKE_CASE_ = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1E-05,
'''token''': 38015,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1E-05,
'''token''': 25506,
'''token_str''': ''' accuser''',
},
] , )
SCREAMING_SNAKE_CASE_ = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 13606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2E-05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9E-05, '''token''': 2941, '''token_str''': ''' Te'''},
] , )
@require_torch
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
SCREAMING_SNAKE_CASE_ = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2E-05, '''token''': 35676, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2E-05, '''token''': 16416, '''token_str''': '''ELS'''},
] , )
SCREAMING_SNAKE_CASE_ = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2E-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2E-05, '''token''': 16416, '''token_str''': '''ELS'''},
] , )
SCREAMING_SNAKE_CASE_ = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1E-05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2E-05, '''token''': 2941, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 13606, '''token_str''': ''' Clara'''},
] , )
SCREAMING_SNAKE_CASE_ = unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=6 ) , [
[
{
'''score''': 2.2E-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2E-05, '''token''': 16416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2E-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2E-05, '''token''': 16416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
SCREAMING_SNAKE_CASE_ = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_A , _A )
@slow
@require_torch
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(_A )
@slow
@require_tf
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(_A )
def _UpperCamelCase ( self , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_A ) , [
{'''sequence''': '''My name is John''', '''score''': 0.008, '''token''': 610, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.007, '''token''': 1573, '''token_str''': ''' Chris'''},
] , )
SCREAMING_SNAKE_CASE_ = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_A ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.251,
'''token''': 2201,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.214,
'''token''': 12790,
'''token_str''': ''' Lyon''',
},
] , )
SCREAMING_SNAKE_CASE_ = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_A ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.005, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.000, '''token''': 13606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.000, '''token''': 2941, '''token_str''': ''' Te'''},
] , )
@require_torch
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
self.run_pipeline_test(_A , [] )
@require_tf
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
self.run_pipeline_test(_A , [] )
def _UpperCamelCase ( self , _A , _A , _A ) -> List[str]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
SCREAMING_SNAKE_CASE_ = FillMaskPipeline(model=_A , tokenizer=_A )
SCREAMING_SNAKE_CASE_ = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def _UpperCamelCase ( self , _A , _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = fill_masker.tokenizer
SCREAMING_SNAKE_CASE_ = fill_masker.model
SCREAMING_SNAKE_CASE_ = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_A , [
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
] , )
SCREAMING_SNAKE_CASE_ = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_A , [
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
] , )
SCREAMING_SNAKE_CASE_ = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_A , [
[
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
],
[
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
],
] , )
with self.assertRaises(_A ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_A ):
fill_masker('''This is''' )
self.run_test_top_k(_A , _A )
self.run_test_targets(_A , _A )
self.run_test_top_k_targets(_A , _A )
self.fill_mask_with_duplicate_targets_and_top_k(_A , _A )
self.fill_mask_with_multiple_masks(_A , _A )
def _UpperCamelCase ( self , _A , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = tokenizer.get_vocab()
SCREAMING_SNAKE_CASE_ = sorted(vocab.keys() )[:2]
# Pipeline argument
SCREAMING_SNAKE_CASE_ = FillMaskPipeline(model=_A , tokenizer=_A , targets=_A )
SCREAMING_SNAKE_CASE_ = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_A , [
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
] , )
SCREAMING_SNAKE_CASE_ = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , _A )
SCREAMING_SNAKE_CASE_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(_A ) )
# Call argument
SCREAMING_SNAKE_CASE_ = FillMaskPipeline(model=_A , tokenizer=_A )
SCREAMING_SNAKE_CASE_ = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_A )
self.assertEqual(
_A , [
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
] , )
SCREAMING_SNAKE_CASE_ = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , _A )
SCREAMING_SNAKE_CASE_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(_A ) )
# Score equivalence
SCREAMING_SNAKE_CASE_ = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_A )
SCREAMING_SNAKE_CASE_ = [top_mask['''token_str'''] for top_mask in outputs]
SCREAMING_SNAKE_CASE_ = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_A ) == set(_A ):
SCREAMING_SNAKE_CASE_ = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_A )
SCREAMING_SNAKE_CASE_ = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_A ) , nested_simplify(_A ) )
# Raises with invalid
with self.assertRaises(_A ):
SCREAMING_SNAKE_CASE_ = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_A ):
SCREAMING_SNAKE_CASE_ = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[''''''] )
with self.assertRaises(_A ):
SCREAMING_SNAKE_CASE_ = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets='''''' )
def _UpperCamelCase ( self , _A , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = FillMaskPipeline(model=_A , tokenizer=_A , top_k=2 )
SCREAMING_SNAKE_CASE_ = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_A , [
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
] , )
SCREAMING_SNAKE_CASE_ = FillMaskPipeline(model=_A , tokenizer=_A )
SCREAMING_SNAKE_CASE_ = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_A , [
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
] , )
self.assertEqual(nested_simplify(_A ) , nested_simplify(_A ) )
def _UpperCamelCase ( self , _A , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = tokenizer.get_vocab()
SCREAMING_SNAKE_CASE_ = FillMaskPipeline(model=_A , tokenizer=_A )
# top_k=2, ntargets=3
SCREAMING_SNAKE_CASE_ = sorted(vocab.keys() )[:3]
SCREAMING_SNAKE_CASE_ = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_A )
# If we use the most probably targets, and filter differently, we should still
# have the same results
SCREAMING_SNAKE_CASE_ = [el['''token_str'''] for el in sorted(_A , key=lambda _A : x["score"] , reverse=_A )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_A ).issubset(_A ):
SCREAMING_SNAKE_CASE_ = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_A )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_A ) , nested_simplify(_A ) )
def _UpperCamelCase ( self , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = FillMaskPipeline(model=_A , tokenizer=_A )
SCREAMING_SNAKE_CASE_ = tokenizer.get_vocab()
# String duplicates + id duplicates
SCREAMING_SNAKE_CASE_ = sorted(vocab.keys() )[:3]
SCREAMING_SNAKE_CASE_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
SCREAMING_SNAKE_CASE_ = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=_A , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_A ) , 3 )
def _UpperCamelCase ( self , _A , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = FillMaskPipeline(model=_A , tokenizer=_A )
SCREAMING_SNAKE_CASE_ = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_A , [
[
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
],
[
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
],
[
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
{'''sequence''': ANY(_A ), '''score''': ANY(_A ), '''token''': ANY(_A ), '''token_str''': ANY(_A )},
],
] , )
| 597 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
A_ : int = ['bert-base-uncased', 'bert-base-cased']
A_ : str = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class _a (tf.keras.Model ):
'''simple docstring'''
def __init__( self , A__ ):
super().__init__()
A__ : Union[str, Any] = tokenizer
A__ : List[str] = AutoConfig.from_pretrained(A__ )
A__ : List[str] = TFAutoModel.from_config(A__ )
def __A ( self , A__ ):
A__ : Tuple = self.tokenizer(A__ )
A__ : Any = self.bert(**A__ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
super().setUp()
A__ : List[Any] = [
BertTokenizer.from_pretrained(A__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
A__ : int = [TFBertTokenizer.from_pretrained(A__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(A__ , use_fast_bert_tokenizer=A__ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
A__ : str = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
A__ : Optional[int] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __A ( self ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
A__ : Any = tokenizer(A__ , return_tensors="""tf""" , padding="""longest""" )
A__ : Tuple = tf_tokenizer(A__ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def __A ( self ):
for tf_tokenizer in self.tf_tokenizers:
A__ : Optional[int] = tf_tokenizer(self.paired_sentences )
A__ : Tuple = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def __A ( self ):
for tf_tokenizer in self.tf_tokenizers:
A__ : int = tf.function(A__ )
for test_inputs in (self.test_sentences, self.paired_sentences):
A__ : Optional[Any] = tf.constant(A__ )
A__ : Dict = compiled_tokenizer(A__ )
A__ : Tuple = tf_tokenizer(A__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __A ( self ):
for tf_tokenizer in self.tf_tokenizers:
A__ : Tuple = ModelToSave(tokenizer=A__ )
A__ : Optional[int] = tf.convert_to_tensor(self.test_sentences )
A__ : Any = model(A__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
A__ : int = Path(A__ ) / """saved.model"""
model.save(A__ )
A__ : Optional[int] = tf.keras.models.load_model(A__ )
A__ : Union[str, Any] = loaded_model(A__ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 456 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ : List[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A_ : Tuple = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
A_ : Optional[int] = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A_ : Optional[int] = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A_ : Optional[int] = 'allenai'
def UpperCamelCase (lowercase_: int ) -> Tuple:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
A__ : int = dict((re.sub(r"""@@$""" , """""" , lowercase_ ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , lowercase_ ), v) for k, v in d.items() )
A__ : str = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
A__ : Any = d[k] # restore
return da
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple ) -> Optional[int]:
# prep
assert os.path.exists(lowercase_ )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
A__ : Dict = basename(lowercase_ )
A__ : int = dirname(lowercase_ )
A__ : List[str] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
A__ : Union[str, Any] = cls.hub_models()
A__ : Optional[Any] = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
A__ : Union[str, Any] = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
A__ : Tuple = hub_utils.from_pretrained(
lowercase_ , lowercase_ , lowercase_ , archive_map=lowercase_ , **lowercase_ )
A__ : Any = vars(chkpt["""args"""]["""model"""] )
A__ : Optional[Any] = args["""source_lang"""]
A__ : Optional[Any] = args["""target_lang"""]
A__ : Dict = dirname(lowercase_ )
A__ : Optional[Any] = basename(lowercase_ )
# dicts
A__ : Optional[int] = os.path.join(lowercase_ , f"""dict.{src_lang}.txt""" )
A__ : int = os.path.join(lowercase_ , f"""dict.{tgt_lang}.txt""" )
A__ : Dict = Dictionary.load(lowercase_ )
A__ : List[str] = rewrite_dict_keys(src_dict.indices )
A__ : Any = len(lowercase_ )
A__ : str = os.path.join(lowercase_ , """vocab-src.json""" )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
A__ : Optional[Any] = True
for k in src_vocab.keys():
if not k.islower():
A__ : Tuple = False
break
A__ : List[str] = Dictionary.load(lowercase_ )
A__ : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
A__ : str = len(lowercase_ )
A__ : int = os.path.join(lowercase_ , """vocab-tgt.json""" )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# merges_file (bpecodes)
A__ : Dict = os.path.join(lowercase_ , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
A__ : Any = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ):
break
with open(lowercase_ , encoding="""utf-8""" ) as fin:
A__ : Any = fin.read()
A__ : List[str] = re.sub(r""" \d+$""" , """""" , lowercase_ , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as fout:
fout.write(lowercase_ )
# model config
A__ : Optional[Any] = os.path.join(lowercase_ , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
A__ : List[str] = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
A__ : Tuple = 5
A__ : List[str] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
A__ : int = best_score_hparams[model_dir]["""length_penalty"""]
else:
A__ : List[Any] = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# tokenizer config
A__ : Dict = os.path.join(lowercase_ , lowercase_ )
A__ : str = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1024,
"""do_lower_case""": do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# model
A__ : int = chkpt["""models"""][0]
A__ : Dict = model.state_dict()
# rename keys to start with 'model.'
A__ : Union[str, Any] = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
A__ : List[str] = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(lowercase_ , lowercase_ )
A__ : str = FSMTConfig.from_pretrained(lowercase_ )
A__ : Dict = FSMTForConditionalGeneration(lowercase_ )
# check that it loads ok
model_new.load_state_dict(lowercase_ , strict=lowercase_ )
# save
A__ : int = os.path.join(lowercase_ , lowercase_ )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(lowercase_ , lowercase_ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A_ : Tuple = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 456 | 1 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class A__ ( unittest.TestCase ):
def lowercase ( self ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowercase ( self ) -> Any:
"""simple docstring"""
__magic_name__ : List[Any] = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=lowerCamelCase , dtype=jnp.bfloataa )
__magic_name__ : Union[str, Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=lowerCamelCase , from_pt=lowerCamelCase , dtype=jnp.bfloataa )
__magic_name__ : List[Any] = controlnet_params
__magic_name__ : str = '''bird'''
__magic_name__ : Tuple = jax.device_count()
__magic_name__ : Union[str, Any] = pipe.prepare_text_inputs([prompts] * num_samples )
__magic_name__ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
__magic_name__ : str = pipe.prepare_image_inputs([canny_image] * num_samples )
__magic_name__ : Optional[int] = jax.random.PRNGKey(0 )
__magic_name__ : Dict = jax.random.split(lowerCamelCase , jax.device_count() )
__magic_name__ : Union[str, Any] = replicate(lowerCamelCase )
__magic_name__ : List[Any] = shard(lowerCamelCase )
__magic_name__ : List[str] = shard(lowerCamelCase )
__magic_name__ : Optional[Any] = pipe(
prompt_ids=lowerCamelCase , image=lowerCamelCase , params=lowerCamelCase , prng_seed=lowerCamelCase , num_inference_steps=50 , jit=lowerCamelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__magic_name__ : Optional[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__magic_name__ : List[str] = images[0, 253:256, 253:256, -1]
__magic_name__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__magic_name__ : Dict = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def lowercase ( self ) -> Any:
"""simple docstring"""
__magic_name__ : Union[str, Any] = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=lowerCamelCase , dtype=jnp.bfloataa )
__magic_name__ : int = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=lowerCamelCase , from_pt=lowerCamelCase , dtype=jnp.bfloataa )
__magic_name__ : List[str] = controlnet_params
__magic_name__ : List[Any] = '''Chef in the kitchen'''
__magic_name__ : Union[str, Any] = jax.device_count()
__magic_name__ : Tuple = pipe.prepare_text_inputs([prompts] * num_samples )
__magic_name__ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
__magic_name__ : Union[str, Any] = pipe.prepare_image_inputs([pose_image] * num_samples )
__magic_name__ : List[Any] = jax.random.PRNGKey(0 )
__magic_name__ : Optional[Any] = jax.random.split(lowerCamelCase , jax.device_count() )
__magic_name__ : int = replicate(lowerCamelCase )
__magic_name__ : Any = shard(lowerCamelCase )
__magic_name__ : str = shard(lowerCamelCase )
__magic_name__ : int = pipe(
prompt_ids=lowerCamelCase , image=lowerCamelCase , params=lowerCamelCase , prng_seed=lowerCamelCase , num_inference_steps=50 , jit=lowerCamelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__magic_name__ : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__magic_name__ : List[str] = images[0, 253:256, 253:256, -1]
__magic_name__ : List[str] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__magic_name__ : Dict = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 713 |
from __future__ import annotations
class A__ :
def __init__( self , lowerCamelCase ) -> None:
"""simple docstring"""
__magic_name__ : List[str] = data
__magic_name__ : Node | None = None
__magic_name__ : Node | None = None
def lowerCAmelCase ( UpperCAmelCase ) ->None: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCAmelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ), depth_of_tree(tree.right ) ) if tree else 0
def lowerCAmelCase ( UpperCAmelCase ) ->bool:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCAmelCase ( ) ->None: # Main function for testing.
"""simple docstring"""
__magic_name__ : Tuple = Node(1 )
__magic_name__ : Union[str, Any] = Node(2 )
__magic_name__ : Tuple = Node(3 )
__magic_name__ : List[str] = Node(4 )
__magic_name__ : str = Node(5 )
__magic_name__ : List[Any] = Node(6 )
__magic_name__ : Optional[int] = Node(7 )
__magic_name__ : str = Node(8 )
__magic_name__ : str = Node(9 )
print(is_full_binary_tree(UpperCAmelCase ) )
print(depth_of_tree(UpperCAmelCase ) )
print('''Tree is: ''' )
display(UpperCAmelCase )
if __name__ == "__main__":
main()
| 336 | 0 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
a_ = logging.getLogger(__name__)
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="masked_bert"
def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=0 , UpperCamelCase_="topK" , UpperCamelCase_="constant" , UpperCamelCase_=0.0 , **UpperCamelCase_ , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowercase : Dict = vocab_size
__lowercase : Dict = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : str = num_attention_heads
__lowercase : str = hidden_act
__lowercase : Dict = intermediate_size
__lowercase : Any = hidden_dropout_prob
__lowercase : List[str] = attention_probs_dropout_prob
__lowercase : Union[str, Any] = max_position_embeddings
__lowercase : List[str] = type_vocab_size
__lowercase : str = initializer_range
__lowercase : List[str] = layer_norm_eps
__lowercase : Dict = pruning_method
__lowercase : Any = mask_init
__lowercase : Union[str, Any] = mask_scale
| 76 |
"""simple docstring"""
import numpy as np
import datasets
a_ = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
a_ = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
a_ = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def _lowerCamelCase ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
# convert to numpy arrays
__lowercase : Dict = np.array(UpperCamelCase_ )
__lowercase : str = np.array(UpperCamelCase_ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
__lowercase : Tuple = X - np.mean(UpperCamelCase_ )
__lowercase : List[Any] = np.cov(reference_distribution.T )
try:
__lowercase : Tuple = np.linalg.inv(UpperCamelCase_ )
except np.linalg.LinAlgError:
__lowercase : str = np.linalg.pinv(UpperCamelCase_ )
__lowercase : Any = np.dot(UpperCamelCase_ , UpperCamelCase_ )
__lowercase : Optional[Any] = np.dot(UpperCamelCase_ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 76 | 1 |
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(A__ ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , A__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 701 |
__magic_name__ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = input('''Enter message: ''' )
UpperCAmelCase = input('''Enter key [alphanumeric]: ''' )
UpperCAmelCase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
UpperCAmelCase = '''encrypt'''
UpperCAmelCase = encrypt_message(A__ , A__ )
elif mode.lower().startswith('''d''' ):
UpperCAmelCase = '''decrypt'''
UpperCAmelCase = decrypt_message(A__ , A__ )
print(F"""\n{mode.title()}ed message:""" )
print(A__ )
def _lowerCAmelCase ( A__: str , A__: str ):
'''simple docstring'''
return translate_message(A__ , A__ , '''encrypt''' )
def _lowerCAmelCase ( A__: str , A__: str ):
'''simple docstring'''
return translate_message(A__ , A__ , '''decrypt''' )
def _lowerCAmelCase ( A__: str , A__: str , A__: str ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = key.upper()
for symbol in message:
UpperCAmelCase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(A__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(A__ ):
UpperCAmelCase = 0
else:
translated.append(A__ )
return "".join(A__ )
if __name__ == "__main__":
main()
| 391 | 0 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
UpperCamelCase_ = True
except (ImportError, ModuleNotFoundError):
UpperCamelCase_ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def lowerCamelCase ( UpperCAmelCase__ : str ) -> str:
'''simple docstring'''
re.sub('<n>' , '' , UpperCAmelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCAmelCase__ ) )
| 209 | '''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _SCREAMING_SNAKE_CASE:
def __init__( self : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Union[str, Any]=4 , UpperCamelCase_ : int=2 , UpperCamelCase_ : List[str]=7 , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : int=True , UpperCamelCase_ : List[str]=99 , UpperCamelCase_ : Dict=36 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Union[str, Any]=4 , UpperCamelCase_ : str=37 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : int=5_12 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : str=6 , UpperCamelCase_ : int=6 , UpperCamelCase_ : List[str]=3 , UpperCamelCase_ : Optional[Any]=4 , UpperCamelCase_ : Any=None , UpperCamelCase_ : Union[str, Any]=10_00 , ) -> int:
SCREAMING_SNAKE_CASE__ :int = parent
SCREAMING_SNAKE_CASE__ :str = batch_size
SCREAMING_SNAKE_CASE__ :Dict = num_channels
SCREAMING_SNAKE_CASE__ :Any = image_size
SCREAMING_SNAKE_CASE__ :Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ :List[Any] = is_training
SCREAMING_SNAKE_CASE__ :Tuple = use_input_mask
SCREAMING_SNAKE_CASE__ :Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ :Optional[Any] = use_labels
SCREAMING_SNAKE_CASE__ :Tuple = vocab_size
SCREAMING_SNAKE_CASE__ :List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ :int = num_hidden_layers
SCREAMING_SNAKE_CASE__ :Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ :Any = intermediate_size
SCREAMING_SNAKE_CASE__ :Tuple = hidden_act
SCREAMING_SNAKE_CASE__ :Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ :Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ :List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE__ :Tuple = type_vocab_size
SCREAMING_SNAKE_CASE__ :List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ :Any = initializer_range
SCREAMING_SNAKE_CASE__ :List[Any] = coordinate_size
SCREAMING_SNAKE_CASE__ :List[Any] = shape_size
SCREAMING_SNAKE_CASE__ :str = num_labels
SCREAMING_SNAKE_CASE__ :Any = num_choices
SCREAMING_SNAKE_CASE__ :Union[str, Any] = scope
SCREAMING_SNAKE_CASE__ :Union[str, Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE__ :str = text_seq_length
SCREAMING_SNAKE_CASE__ :int = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.text_seq_length + self.image_seq_length
def __lowerCamelCase ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ :List[str] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
SCREAMING_SNAKE_CASE__ :Any = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE__ :str = bbox[i, j, 3]
SCREAMING_SNAKE_CASE__ :str = bbox[i, j, 1]
SCREAMING_SNAKE_CASE__ :Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE__ :Optional[int] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE__ :Dict = bbox[i, j, 0]
SCREAMING_SNAKE_CASE__ :Any = tmp_coordinate
SCREAMING_SNAKE_CASE__ :Tuple = tf.constant(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ :Tuple = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE__ :Optional[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ :Optional[Any] = None
SCREAMING_SNAKE_CASE__ :Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ :Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCamelCase ( self : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :Dict = TFLayoutLMvaModel(config=UpperCamelCase_ )
# text + image
SCREAMING_SNAKE_CASE__ :int = model(UpperCamelCase_ , pixel_values=UpperCamelCase_ , training=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :int = model(
UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , training=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ :str = model(UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE__ :List[Any] = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE__ :Optional[int] = model({'pixel_values': pixel_values} , training=UpperCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Any = self.num_labels
SCREAMING_SNAKE_CASE__ :Any = TFLayoutLMvaForSequenceClassification(config=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = model(
UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE__ :Tuple = TFLayoutLMvaForTokenClassification(config=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = model(
UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCamelCase ( self : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ :int = 2
SCREAMING_SNAKE_CASE__ :str = TFLayoutLMvaForQuestionAnswering(config=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = model(
UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ , training=UpperCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ :Tuple = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) :List[str] = config_and_inputs
SCREAMING_SNAKE_CASE__ :Tuple = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ : List[str] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A_ : str = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A_ : Tuple = False
A_ : Tuple = False
A_ : Tuple = False
def __lowerCamelCase ( self : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] ) -> Optional[Any]:
return True
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any=False ) -> dict:
SCREAMING_SNAKE_CASE__ :Dict = copy.deepcopy(UpperCamelCase_ )
if model_class in get_values(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :Optional[int] = {
k: tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(UpperCamelCase_ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :Any = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
SCREAMING_SNAKE_CASE__ :Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :Optional[Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __lowerCamelCase ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Optional[Any] = TFLayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def __lowerCamelCase ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ :int = model_class(UpperCamelCase_ )
if getattr(UpperCamelCase_ , 'hf_compute_loss' , UpperCamelCase_ ):
# The number of elements in the loss should be the same as the number of elements in the label
SCREAMING_SNAKE_CASE__ :List[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase_ , return_labels=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCamelCase_ )[0]
]
SCREAMING_SNAKE_CASE__ :List[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
SCREAMING_SNAKE_CASE__ :List[Any] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase_ , return_labels=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = prepared_for_class.pop('input_ids' )
SCREAMING_SNAKE_CASE__ :Optional[int] = model(UpperCamelCase_ , **UpperCamelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
SCREAMING_SNAKE_CASE__ :Tuple = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase_ , return_labels=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
SCREAMING_SNAKE_CASE__ :Optional[int] = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
SCREAMING_SNAKE_CASE__ :str = -1_00
SCREAMING_SNAKE_CASE__ :int = tf.convert_to_tensor(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = model(UpperCamelCase_ , **UpperCamelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
SCREAMING_SNAKE_CASE__ :Optional[int] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase_ , return_labels=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Any = model(UpperCamelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , UpperCamelCase_ , return_labels=UpperCamelCase_ )
# Get keys that were added with the _prepare_for_class function
SCREAMING_SNAKE_CASE__ :int = prepared_for_class.keys() - inputs_dict.keys()
SCREAMING_SNAKE_CASE__ :Union[str, Any] = inspect.signature(model.call ).parameters
SCREAMING_SNAKE_CASE__ :Optional[Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
SCREAMING_SNAKE_CASE__ :List[str] = {0: 'input_ids'}
for label_key in label_keys:
SCREAMING_SNAKE_CASE__ :Tuple = signature_names.index(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = label_key
SCREAMING_SNAKE_CASE__ :Any = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
SCREAMING_SNAKE_CASE__ :List[str] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
SCREAMING_SNAKE_CASE__ :List[str] = prepared_for_class[value]
SCREAMING_SNAKE_CASE__ :List[str] = tuple(UpperCamelCase_ )
# Send to model
SCREAMING_SNAKE_CASE__ :List[str] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __lowerCamelCase ( self : Tuple ) -> str:
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __lowerCamelCase ( self : Tuple ) -> Optional[int]:
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) :List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ :Optional[int] = type
self.model_tester.create_and_check_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __lowerCamelCase ( self : Dict ) -> Optional[Any]:
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __lowerCamelCase ( self : Dict ) -> Any:
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __lowerCamelCase ( self : Tuple ) -> str:
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@slow
def __lowerCamelCase ( self : Any ) -> Any:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ :int = TFLayoutLMvaModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@cached_property
def __lowerCamelCase ( self : Optional[Any] ) -> List[str]:
return LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase_ ) if is_vision_available() else None
@slow
def __lowerCamelCase ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE__ :List[Any] = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
SCREAMING_SNAKE_CASE__ :str = self.default_image_processor
SCREAMING_SNAKE_CASE__ :int = prepare_img()
SCREAMING_SNAKE_CASE__ :str = image_processor(images=UpperCamelCase_ , return_tensors='tf' ).pixel_values
SCREAMING_SNAKE_CASE__ :Tuple = tf.constant([[1, 2]] )
SCREAMING_SNAKE_CASE__ :Optional[int] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
SCREAMING_SNAKE_CASE__ :Tuple = model(input_ids=UpperCamelCase_ , bbox=UpperCamelCase_ , pixel_values=UpperCamelCase_ , training=UpperCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE__ :List[Any] = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1e-4 ) )
| 209 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase =logging.getLogger(__name__)
@dataclass
class __magic_name__ :
UpperCAmelCase =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase =field(
default=lowerCAmelCase ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase =field(
default=lowerCAmelCase ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase =field(
default=lowerCAmelCase ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
UpperCAmelCase =field(default=lowerCAmelCase ,metadata={"help": "Whether tp freeze the encoder."} )
UpperCAmelCase =field(default=lowerCAmelCase ,metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class __magic_name__ :
UpperCAmelCase =field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
UpperCAmelCase =field(
default="summarization" ,metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} ,)
UpperCAmelCase =field(
default=1_0_2_4 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
UpperCAmelCase =field(
default=1_2_8 ,metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
UpperCAmelCase =field(
default=1_4_2 ,metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} ,)
UpperCAmelCase =field(
default=1_4_2 ,metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
UpperCAmelCase =field(default=-1 ,metadata={"help": "# training examples. -1 means use all."} )
UpperCAmelCase =field(default=-1 ,metadata={"help": "# validation examples. -1 means use all."} )
UpperCAmelCase =field(default=-1 ,metadata={"help": "# test examples. -1 means use all."} )
UpperCAmelCase =field(default=lowerCAmelCase ,metadata={"help": "Source language id for translation."} )
UpperCAmelCase =field(default=lowerCAmelCase ,metadata={"help": "Target language id for translation."} )
UpperCAmelCase =field(default=lowerCAmelCase ,metadata={"help": "# num_beams to use for evaluation."} )
UpperCAmelCase =field(
default=lowerCAmelCase ,metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} ,)
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
logger.info(f"***** {split} metrics *****" )
for key in sorted(metrics.keys() ):
logger.info(f" {key} = {metrics[key]}" )
save_json(__lowerCamelCase , os.path.join(__lowerCamelCase , f"{split}_results.json" ) )
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : List[Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase : str =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase : Tuple =parser.parse_args_into_dataclasses()
check_output_dir(__lowerCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , __lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : Optional[int] =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCAmelCase : Dict =('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
assert hasattr(__lowerCamelCase , __lowerCamelCase ), f"({config.__class__.__name__}) doesn't have a `{p}` attribute"
setattr(__lowerCamelCase , __lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
_UpperCAmelCase : Union[str, Any] =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCAmelCase : Any =AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=__lowerCamelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__lowerCamelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_UpperCAmelCase : str =model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__lowerCamelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_UpperCAmelCase : Optional[Any] =tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_UpperCAmelCase : List[str] =tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__lowerCamelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_UpperCAmelCase : Union[str, Any] =SeqaSeqDataset
# Get datasets
_UpperCAmelCase : Optional[Any] =(
dataset_class(
__lowerCamelCase , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
_UpperCAmelCase : Union[str, Any] =(
dataset_class(
__lowerCamelCase , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_UpperCAmelCase : str =(
dataset_class(
__lowerCamelCase , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_UpperCAmelCase : Union[str, Any] =(
build_compute_metrics_fn(data_args.task , __lowerCamelCase ) if training_args.predict_with_generate else None
)
_UpperCAmelCase : Optional[Any] =SeqaSeqTrainer(
model=__lowerCamelCase , args=__lowerCamelCase , data_args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , data_collator=SeqaSeqDataCollator(
__lowerCamelCase , __lowerCamelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__lowerCamelCase , tokenizer=__lowerCamelCase , )
_UpperCAmelCase : Tuple ={}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
_UpperCAmelCase : Tuple =trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_UpperCAmelCase : List[Any] =train_result.metrics
_UpperCAmelCase : Dict =data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , __lowerCamelCase , training_args.output_dir )
all_metrics.update(__lowerCamelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase : Any =trainer.evaluate(metric_key_prefix='val' )
_UpperCAmelCase : Tuple =data_args.n_val
_UpperCAmelCase : int =round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , __lowerCamelCase , training_args.output_dir )
all_metrics.update(__lowerCamelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
_UpperCAmelCase : Optional[int] =trainer.predict(test_dataset=__lowerCamelCase , metric_key_prefix='test' )
_UpperCAmelCase : Optional[Any] =test_output.metrics
_UpperCAmelCase : str =data_args.n_test
if trainer.is_world_process_zero():
_UpperCAmelCase : List[Any] =round(metrics['test_loss'] , 4 )
handle_metrics('test' , __lowerCamelCase , training_args.output_dir )
all_metrics.update(__lowerCamelCase )
if training_args.predict_with_generate:
_UpperCAmelCase : Dict =tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
_UpperCAmelCase : Optional[int] =lmap(str.strip , __lowerCamelCase )
write_txt_file(__lowerCamelCase , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(__lowerCamelCase , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 719 |
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =CustomTokenizer
pass
| 331 | 0 |
from __future__ import annotations
def a_ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] =get_failure_array(lowerCAmelCase_ )
# 2) Step through text searching for pattern
_lowerCamelCase : int =0, 0 # index into text, pattern
while i < len(lowerCAmelCase_ ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_lowerCamelCase : Any =failure[j - 1]
continue
i += 1
return False
def a_ ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] =[0]
_lowerCamelCase : Tuple =0
_lowerCamelCase : int =1
while j < len(lowerCAmelCase_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_lowerCamelCase : int =failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase_ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase = 'abc1abc12'
lowerCamelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
lowerCamelCase = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase = 'ABABX'
lowerCamelCase = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
lowerCamelCase = 'AAAB'
lowerCamelCase = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
lowerCamelCase = 'abcdabcy'
lowerCamelCase = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
lowerCamelCase = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 464 | import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case__ ( _UpperCamelCase ):
def __init__( self : Union[str, Any] , A__ : VQModel , A__ : UNetaDModel , A__ : DDIMScheduler ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=A__ , unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self : str , A__ : int = 1 , A__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A__ : float = 0.0 , A__ : int = 50 , A__ : Optional[str] = "pil" , A__ : bool = True , **A__ : Optional[Any] , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
snake_case_ : Optional[int] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=A__ , )
snake_case_ : List[Any] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case_ : Any = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
snake_case_ : Union[str, Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case_ : List[Any] = {}
if accepts_eta:
snake_case_ : int = eta
for t in self.progress_bar(self.scheduler.timesteps ):
snake_case_ : Union[str, Any] = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
snake_case_ : Dict = self.unet(A__ , A__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : Union[str, Any] = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# decode the image latents with the VAE
snake_case_ : int = self.vqvae.decode(A__ ).sample
snake_case_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Optional[int] = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 666 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase ( lowerCamelCase_ : Namespace ):
return TrainCommand(lowerCamelCase_ )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _UpperCAmelCase (_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = parser.add_parser('''train''' ,help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' ,type=_lowerCamelCase ,required=_lowerCamelCase ,help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' ,)
train_parser.add_argument(
'''--column_label''' ,type=_lowerCamelCase ,default=0 ,help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' ,type=_lowerCamelCase ,default=1 ,help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' ,type=_lowerCamelCase ,default=2 ,help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' ,action='''store_true''' ,help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' ,type=_lowerCamelCase ,default='''''' ,help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' ,type=_lowerCamelCase ,default=0.1 ,help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' ,)
train_parser.add_argument('''--output''' ,type=_lowerCamelCase ,default='''./''' ,help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' ,type=_lowerCamelCase ,default='''text_classification''' ,help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' ,type=_lowerCamelCase ,default='''bert-base-uncased''' ,help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' ,type=_lowerCamelCase ,default=32 ,help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' ,type=_lowerCamelCase ,default=64 ,help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' ,type=_lowerCamelCase ,default=3E-5 ,help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' ,type=_lowerCamelCase ,default=1E-0_8 ,help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__(self ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = logging.get_logger('''transformers-cli/training''' )
__lowercase = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output ,exist_ok=_lowerCamelCase )
__lowercase = args.output
__lowercase = args.column_label
__lowercase = args.column_text
__lowercase = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
__lowercase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}" )
__lowercase = Processor.create_from_csv(
args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
__lowercase = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}" )
__lowercase = Processor.create_from_csv(
args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
__lowercase = args.validation_split
__lowercase = args.train_batch_size
__lowercase = args.valid_batch_size
__lowercase = args.learning_rate
__lowercase = args.adam_epsilon
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
self.pipeline.fit(
self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 56 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_SCREAMING_SNAKE_CASE = False
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 12
@property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return 32
@property
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=3 ,num_vq_embeddings=self.num_embed ,vq_embed_dim=3 ,)
return model
@property
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(_lowerCamelCase )
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = 12
__lowercase = 12
__lowercase = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
__lowercase = TransformeraDModel(**_lowerCamelCase )
return model
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowerCamelCase )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = '''cpu'''
__lowercase = self.dummy_vqvae
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_transformer
__lowercase = VQDiffusionScheduler(self.num_embed )
__lowercase = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowerCamelCase ,hidden_size=self.text_embedder_hidden_size ,length=tokenizer.model_max_length )
__lowercase = VQDiffusionPipeline(
vqvae=_lowerCamelCase ,text_encoder=_lowerCamelCase ,tokenizer=_lowerCamelCase ,transformer=_lowerCamelCase ,scheduler=_lowerCamelCase ,learned_classifier_free_sampling_embeddings=_lowerCamelCase ,)
__lowercase = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__lowercase = '''teddy bear playing in the pool'''
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe([prompt] ,generator=_lowerCamelCase ,num_inference_steps=2 ,output_type='''np''' )
__lowercase = output.images
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipe(
[prompt] ,generator=_lowerCamelCase ,output_type='''np''' ,return_dict=_lowerCamelCase ,num_inference_steps=2 )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
__lowercase = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
__lowercase = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowercase = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
__lowercase = pipeline(
'''teddy bear playing in the pool''' ,num_images_per_prompt=1 ,generator=_lowerCamelCase ,output_type='''np''' ,)
__lowercase = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 56 | 1 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
SCREAMING_SNAKE_CASE : int = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
SCREAMING_SNAKE_CASE : List[str] = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
SCREAMING_SNAKE_CASE : List[str] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def _UpperCAmelCase ( self , __a=None , __a=None , __a=False ):
"""simple docstring"""
if concatenate_texts:
return compute_measures(__a , __a )["wer"]
else:
A__ = 0
A__ = 0
for prediction, reference in zip(__a , __a ):
A__ = compute_measures(__a , __a )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 260 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Tuple = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 260 | 1 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :List[Any] = ["transformers", "torch", "note_seq"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def _A ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def _A ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 701 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def SCREAMING_SNAKE_CASE( __UpperCamelCase = 8 ) -> str:
a__ : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__UpperCamelCase )
a__ : List[Any] = i // 3
a__ : int = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
a__ : Union[str, Any] = (
chars_incl
+ random(__UpperCamelCase , quotient + remainder )
+ random(__UpperCamelCase , __UpperCamelCase )
+ random(__UpperCamelCase , __UpperCamelCase )
)
a__ : Tuple = list(__UpperCamelCase )
shuffle(__UpperCamelCase )
return "".join(__UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> str:
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> List[str]:
pass # Put your code here...
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
pass # Put your code here...
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
pass # Put your code here...
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase = 8 ) -> bool:
if len(__UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
a__ : Dict = any(char in ascii_uppercase for char in password )
a__ : Optional[int] = any(char in ascii_lowercase for char in password )
a__ : Optional[Any] = any(char in digits for char in password )
a__ : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def SCREAMING_SNAKE_CASE( ) -> Dict:
a__ : List[Any] = int(input("Please indicate the max length of your password: " ).strip() )
a__ : Optional[Any] = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(__UpperCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(__UpperCamelCase , __UpperCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 207 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
SCREAMING_SNAKE_CASE__ = '''__DUMMY_TRANSFORMERS_USER__'''
SCREAMING_SNAKE_CASE__ = '''Dummy User'''
SCREAMING_SNAKE_CASE__ = '''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'''
SCREAMING_SNAKE_CASE__ = '''https://hub-ci.huggingface.co'''
SCREAMING_SNAKE_CASE__ = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
SCREAMING_SNAKE_CASE__ = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
SCREAMING_SNAKE_CASE__ = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def A ( __UpperCamelCase ) -> str:
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , __UpperCamelCase )
@pytest.fixture
def A ( __UpperCamelCase ) -> Optional[int]:
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , __UpperCamelCase )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , __UpperCamelCase )
@pytest.fixture
def A ( __UpperCamelCase ) -> Union[str, Any]:
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , __UpperCamelCase )
@pytest.fixture
def A ( __UpperCamelCase , __UpperCamelCase ) -> str:
HfFolder.save_token(__UpperCamelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def A ( ) -> Tuple:
return HfApi(endpoint=__UpperCamelCase )
@pytest.fixture(scope='session' )
def A ( __UpperCamelCase ) -> List[str]:
A__ = HfFolder.get_token()
HfFolder.save_token(__UpperCamelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__UpperCamelCase )
@pytest.fixture
def A ( __UpperCamelCase ) -> int:
def _cleanup_repo(__UpperCamelCase ):
hf_api.delete_repo(__UpperCamelCase , token=__UpperCamelCase , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def A ( __UpperCamelCase ) -> List[Any]:
@contextmanager
def _temporary_repo(__UpperCamelCase ):
try:
yield repo_id
finally:
cleanup_repo(__UpperCamelCase )
return _temporary_repo
@pytest.fixture(scope='session' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
A__ = f'''repo_txt_data-{int(time.time() * 10E3 )}'''
A__ = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(__UpperCamelCase , token=__UpperCamelCase , repo_type='dataset' , private=__UpperCamelCase )
hf_api.upload_file(
token=__UpperCamelCase , path_or_fileobj=str(__UpperCamelCase ) , path_in_repo='data/text_data.txt' , repo_id=__UpperCamelCase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(__UpperCamelCase , token=__UpperCamelCase , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
A__ = f'''repo_zipped_txt_data-{int(time.time() * 10E3 )}'''
A__ = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(__UpperCamelCase , token=__UpperCamelCase , repo_type='dataset' , private=__UpperCamelCase )
hf_api.upload_file(
token=__UpperCamelCase , path_or_fileobj=str(__UpperCamelCase ) , path_in_repo='data.zip' , repo_id=__UpperCamelCase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(__UpperCamelCase , token=__UpperCamelCase , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
A__ = f'''repo_zipped_img_data-{int(time.time() * 10E3 )}'''
A__ = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(__UpperCamelCase , token=__UpperCamelCase , repo_type='dataset' , private=__UpperCamelCase )
hf_api.upload_file(
token=__UpperCamelCase , path_or_fileobj=str(__UpperCamelCase ) , path_in_repo='data.zip' , repo_id=__UpperCamelCase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(__UpperCamelCase , token=__UpperCamelCase , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
return hf_private_dataset_repo_zipped_img_data_
| 9 |
UpperCAmelCase_ = {"""a""": ["""c""", """b"""], """b""": ["""d""", """e"""], """c""": [], """d""": [], """e""": []}
UpperCAmelCase_ = ["""a""", """b""", """c""", """d""", """e"""]
def __magic_name__ ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Tuple = start
# add current to visited
visited.append(lowercase )
lowercase_ : Any = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowercase_ : List[str] = topological_sort(lowercase , lowercase , lowercase )
# if all neighbors visited add current to sort
sort.append(lowercase )
# if all vertices haven't been visited select a new one to visit
if len(lowercase ) != len(lowercase ):
for vertice in vertices:
if vertice not in visited:
lowercase_ : Optional[Any] = topological_sort(lowercase , lowercase , lowercase )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase_ = topological_sort("""a""", [], [])
print(sort) | 458 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def A__ (self):
'''simple docstring'''
super().tearDown()
gc.collect()
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
__UpperCAmelCase ='''A painting of a squirrel eating a burger'''
__UpperCAmelCase =jax.device_count()
__UpperCAmelCase =num_samples * [prompt]
__UpperCAmelCase =sd_pipe.prepare_inputs(lowercase__)
__UpperCAmelCase =replicate(lowercase__)
__UpperCAmelCase =shard(lowercase__)
__UpperCAmelCase =jax.random.PRNGKey(0)
__UpperCAmelCase =jax.random.split(lowercase__ , jax.device_count())
__UpperCAmelCase =sd_pipe(lowercase__ , lowercase__ , lowercase__ , num_inference_steps=2_5 , jit=lowercase__)[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
__UpperCAmelCase =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
__UpperCAmelCase =images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__UpperCAmelCase =jnp.asarray(jax.device_get(image_slice.flatten()))
__UpperCAmelCase =jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def A__ (self):
'''simple docstring'''
__UpperCAmelCase ='''stabilityai/stable-diffusion-2'''
__UpperCAmelCase =FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase__ , subfolder='''scheduler''')
__UpperCAmelCase =FlaxStableDiffusionPipeline.from_pretrained(
lowercase__ , scheduler=lowercase__ , revision='''bf16''' , dtype=jnp.bfloataa , )
__UpperCAmelCase =scheduler_params
__UpperCAmelCase ='''A painting of a squirrel eating a burger'''
__UpperCAmelCase =jax.device_count()
__UpperCAmelCase =num_samples * [prompt]
__UpperCAmelCase =sd_pipe.prepare_inputs(lowercase__)
__UpperCAmelCase =replicate(lowercase__)
__UpperCAmelCase =shard(lowercase__)
__UpperCAmelCase =jax.random.PRNGKey(0)
__UpperCAmelCase =jax.random.split(lowercase__ , jax.device_count())
__UpperCAmelCase =sd_pipe(lowercase__ , lowercase__ , lowercase__ , num_inference_steps=2_5 , jit=lowercase__)[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
__UpperCAmelCase =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
__UpperCAmelCase =images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__UpperCAmelCase =jnp.asarray(jax.device_get(image_slice.flatten()))
__UpperCAmelCase =jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 703 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> Optional[int]:
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> str:
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase =JsonDatasetReader(snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_json_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase =features.copy() if features else default_expected_features
__UpperCAmelCase =(
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase =JsonDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_json_dataset(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCAmelCase =features.copy() if features else default_expected_features
__UpperCAmelCase =(
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase =JsonDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> Optional[Any]:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__UpperCAmelCase ={'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCAmelCase =features.copy()
__UpperCAmelCase =(
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase =JsonDatasetReader(snake_case__ , features=snake_case__ , cache_dir=snake_case__ ).read()
assert isinstance(snake_case__ , snake_case__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Dict:
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase =JsonDatasetReader(snake_case__ , cache_dir=snake_case__ , split=snake_case__ ).read()
_check_json_dataset(snake_case__ , snake_case__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
if issubclass(snake_case__ , snake_case__ ):
__UpperCAmelCase =jsonl_path
elif issubclass(snake_case__ , snake_case__ ):
__UpperCAmelCase =[jsonl_path]
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase =JsonDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_json_dataset(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__=("train",) ) -> Dict:
assert isinstance(snake_case__ , snake_case__ )
for split in splits:
__UpperCAmelCase =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Any:
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCAmelCase =JsonDatasetReader({'''train''': jsonl_path} , cache_dir=snake_case__ , keep_in_memory=snake_case__ ).read()
_check_json_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase =features.copy() if features else default_expected_features
__UpperCAmelCase =(
Features({feature: Value(snake_case__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCAmelCase =JsonDatasetReader({'''train''': jsonl_path} , features=snake_case__ , cache_dir=snake_case__ ).read()
_check_json_datasetdict(snake_case__ , snake_case__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
if split:
__UpperCAmelCase ={split: jsonl_path}
else:
__UpperCAmelCase ='''train'''
__UpperCAmelCase ={'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCAmelCase =tmp_path / '''cache'''
__UpperCAmelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCAmelCase =JsonDatasetReader(snake_case__ , cache_dir=snake_case__ ).read()
_check_json_datasetdict(snake_case__ , snake_case__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> int:
return json.load(snake_case__ )
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Union[str, Any]:
return [json.loads(snake_case__ ) for line in buffer]
class _SCREAMING_SNAKE_CASE :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)])
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase , UpperCAmelCase , lines=UpperCAmelCase).write()
buffer.seek(0)
__UpperCAmelCase =load_json_function(UpperCAmelCase)
assert isinstance(UpperCAmelCase , UpperCAmelCase)
assert isinstance(exported_content[0] , UpperCAmelCase)
assert len(UpperCAmelCase) == 1_0
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789'''), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase , UpperCAmelCase , lines=UpperCAmelCase , orient=UpperCAmelCase).write()
buffer.seek(0)
__UpperCAmelCase =load_json(UpperCAmelCase)
assert isinstance(UpperCAmelCase , UpperCAmelCase)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCAmelCase , '''keys''') and not hasattr(exported_content[0] , '''keys''')
if len_at:
assert len(exported_content[len_at]) == 1_0
else:
assert len(UpperCAmelCase) == 1_0
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)])
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase , UpperCAmelCase , lines=UpperCAmelCase , num_proc=2).write()
buffer.seek(0)
__UpperCAmelCase =load_json_function(UpperCAmelCase)
assert isinstance(UpperCAmelCase , UpperCAmelCase)
assert isinstance(exported_content[0] , UpperCAmelCase)
assert len(UpperCAmelCase) == 1_0
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789'''), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase , UpperCAmelCase , lines=UpperCAmelCase , orient=UpperCAmelCase , num_proc=2).write()
buffer.seek(0)
__UpperCAmelCase =load_json(UpperCAmelCase)
assert isinstance(UpperCAmelCase , UpperCAmelCase)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCAmelCase , '''keys''') and not hasattr(exported_content[0] , '''keys''')
if len_at:
assert len(exported_content[len_at]) == 1_0
else:
assert len(UpperCAmelCase) == 1_0
def A__ (self , UpperCAmelCase):
'''simple docstring'''
with pytest.raises(UpperCAmelCase):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCAmelCase , UpperCAmelCase , num_proc=0)
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')])
def A__ (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =tmp_path_factory.mktemp('''data''') / f"""test.json.{extension}"""
__UpperCAmelCase =str(shared_datadir / f"""test_file.json.{extension}""")
JsonDatasetWriter(UpperCAmelCase , UpperCAmelCase , compression=UpperCAmelCase).write()
with fsspec.open(UpperCAmelCase , '''rb''' , compression='''infer''') as f:
__UpperCAmelCase =f.read()
with fsspec.open(UpperCAmelCase , '''rb''' , compression='''infer''') as f:
__UpperCAmelCase =f.read()
assert exported_content == original_content
| 142 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class a ( a__ ):
snake_case__ = '''megatron-bert'''
def __init__( self , _snake_case=2_90_56 , _snake_case=10_24 , _snake_case=24 , _snake_case=16 , _snake_case=40_96 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=0 , _snake_case="absolute" , _snake_case=True , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
| 4 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , )
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
# first forward pass
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
snake_case__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
snake_case__ = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'single_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'multi_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = OpenLlamaModel(_snake_case )
original_model.to(_snake_case )
original_model.eval()
lowerCAmelCase = original_model(_snake_case ).last_hidden_state
lowerCAmelCase = original_model(_snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = {'type': scaling_type, 'factor': 10.0}
lowerCAmelCase = OpenLlamaModel(_snake_case )
scaled_model.to(_snake_case )
scaled_model.eval()
lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
| 4 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ : Tuple = {
'''configuration_blenderbot_small''': [
'''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotSmallConfig''',
'''BlenderbotSmallOnnxConfig''',
],
'''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Dict = ['''BlenderbotSmallTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotSmallForCausalLM''',
'''BlenderbotSmallForConditionalGeneration''',
'''BlenderbotSmallModel''',
'''BlenderbotSmallPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'''TFBlenderbotSmallForConditionalGeneration''',
'''TFBlenderbotSmallModel''',
'''TFBlenderbotSmallPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'''FlaxBlenderbotSmallForConditionalGeneration''',
'''FlaxBlenderbotSmallModel''',
'''FlaxBlenderbotSmallPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowercase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 295 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A ( __lowercase ):
_snake_case =42
_snake_case =42
def __init__( self: int , _lowerCAmelCase: UNetaDModel , _lowerCAmelCase: KarrasVeScheduler ) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
@torch.no_grad()
def __call__( self: int , _lowerCAmelCase: int = 1 , _lowerCAmelCase: int = 50 , _lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase: Optional[str] = "pil" , _lowerCAmelCase: bool = True , **_lowerCAmelCase: Union[str, Any] , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
UpperCAmelCase_ =self.unet.config.sample_size
UpperCAmelCase_ =(batch_size, 3, img_size, img_size)
UpperCAmelCase_ =self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
UpperCAmelCase_ =randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
UpperCAmelCase_ =self.scheduler.schedule[t]
UpperCAmelCase_ =self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
UpperCAmelCase_ , UpperCAmelCase_ =self.scheduler.add_noise_to_input(_lowerCAmelCase , _lowerCAmelCase , generator=_lowerCAmelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase_ =(sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
UpperCAmelCase_ =self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase_ =(sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
UpperCAmelCase_ =self.scheduler.step_correct(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , step_output.prev_sample , step_output["derivative"] , )
UpperCAmelCase_ =step_output.prev_sample
UpperCAmelCase_ =(sample / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ =sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ =self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCAmelCase )
| 54 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : Optional[int] = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 | 0 |
import math
def A ( __UpperCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_ = f"Input value of [number={number}] must be an integer"
raise TypeError(__UpperCAmelCase )
if number < 1:
UpperCAmelCase_ = f"Input value of [number={number}] must be > 0"
raise ValueError(__UpperCAmelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
UpperCAmelCase_ = int(math.log(number // 3 , 2 ) ) + 2
UpperCAmelCase_ = [3, 5]
UpperCAmelCase_ = 2
UpperCAmelCase_ = 3
for block in range(1 , __UpperCAmelCase ):
for _ in range(__UpperCAmelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
UpperCamelCase_ = 0
try:
UpperCamelCase_ = proth(number)
except ValueError:
print(f"ValueError: there is no {number}th Proth number")
continue
print(f"The {number}th Proth number: {value}")
| 561 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def A ( ) -> Optional[int]:
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class a_ ( nn.Module ):
def __init__( self :Dict) -> Any:
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def __a ( self :str , _lowercase :int) -> str:
return self.lineara(self.batchnorm(self.lineara(_lowercase)))
class a_ ( unittest.TestCase ):
def __a ( self :Any) -> int:
UpperCAmelCase_ = []
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(_lowercase :List[str]):
nonlocal batch_sizes
batch_sizes.append(_lowercase)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_lowercase , [128, 64, 32, 16, 8])
def __a ( self :Union[str, Any]) -> Union[str, Any]:
UpperCAmelCase_ = []
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(_lowercase :Optional[int] , _lowercase :str):
nonlocal batch_sizes
batch_sizes.append(_lowercase)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCAmelCase_ , UpperCAmelCase_ = mock_training_loop_function('''hello''')
self.assertListEqual(_lowercase , [128, 64, 32, 16, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def __a ( self :Optional[Any]) -> str:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(_lowercase :Optional[Any]):
pass
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def __a ( self :Any) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(_lowercase :Tuple):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def __a ( self :str) -> Dict:
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(_lowercase :List[Any] , _lowercase :Union[str, Any] , _lowercase :Tuple):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def __a ( self :Optional[int]) -> Any:
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(_lowercase :List[str]):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def __a ( self :List[Any]) -> Union[str, Any]:
UpperCAmelCase_ = torch.cuda.memory_allocated()
UpperCAmelCase_ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _lowercase)
UpperCAmelCase_ = release_memory(_lowercase)
self.assertEqual(torch.cuda.memory_allocated() , _lowercase)
| 561 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(A_ ) # No of vertices in graph
__SCREAMING_SNAKE_CASE = [0] * n
__SCREAMING_SNAKE_CASE = [False] * n
def dfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(A_ , A_ , A_ , id_ )
__SCREAMING_SNAKE_CASE = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
__SCREAMING_SNAKE_CASE = min(low[at] , low[to] )
__SCREAMING_SNAKE_CASE = []
for i in range(A_ ):
if not visited[i]:
dfs(A_ , -1 , A_ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | '''simple docstring'''
from typing import List
from .keymap import KEYMAP, get_character
def lowerCamelCase__ ( A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += [key]
setattr(A_ , "handle_key" , A_ )
return func
return decorator
def lowerCamelCase__ ( *A_ ):
def decorator(A_ ):
UpperCAmelCase_ = getattr(A_ , "handle_key" , [] )
handle += keys
setattr(A_ , "handle_key" , A_ )
return func
return decorator
class lowercase_ ( _A ):
def __new__( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = super().__new__(cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not hasattr(UpperCamelCase__ , "key_handler" ):
setattr(UpperCamelCase__ , "key_handler" , {} )
setattr(UpperCamelCase__ , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase_ = getattr(UpperCamelCase__ , "handle_key" , [] )
for key in handled_keys:
UpperCAmelCase_ = value
return new_cls
@staticmethod
def lowerCamelCase_ ( cls ) -> str:
"""simple docstring"""
UpperCAmelCase_ = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase_ = ord(UpperCamelCase__ )
UpperCAmelCase_ = cls.key_handler.get(UpperCamelCase__ )
if handler:
UpperCAmelCase_ = char
return handler(cls )
else:
return None
def lowerCamelCase__ ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 660 | 0 |
import logging
import os
from .state import PartialState
class a ( logging.LoggerAdapter ):
@staticmethod
def _UpperCAmelCase ( A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _UpperCAmelCase ( self , A_ , A_ , *A_ , **A_ ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
_UpperCAmelCase : Tuple = kwargs.pop("main_process_only" , A_ )
_UpperCAmelCase : int = kwargs.pop("in_order" , A_ )
if self.isEnabledFor(A_ ):
if self._should_log(A_ ):
_UpperCAmelCase : Optional[int] = self.process(A_ , A_ )
self.logger.log(A_ , A_ , *A_ , **A_ )
elif in_order:
_UpperCAmelCase : Dict = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
_UpperCAmelCase : Union[str, Any] = self.process(A_ , A_ )
self.logger.log(A_ , A_ , *A_ , **A_ )
state.wait_for_everyone()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: str = None ) -> List[Any]:
if log_level is None:
_UpperCAmelCase : List[str] = os.environ.get("ACCELERATE_LOG_LEVEL" , lowerCAmelCase )
_UpperCAmelCase : str = logging.getLogger(lowerCAmelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowerCAmelCase , {} )
| 721 |
from __future__ import annotations
import numpy as np
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list[float] ) -> Dict:
return np.maximum(0 , lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 467 | 0 |
from __future__ import annotations
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if len(lowerCamelCase__ ) < k or k < 0:
raise ValueError("Invalid Input" )
lowercase__ : Optional[Any] = sum(array[:k] )
for i in range(len(lowerCamelCase__ ) - k ):
lowercase__ : Dict = current_sum - array[i] + array[i + k]
lowercase__ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowerCAmelCase__ = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0_0)]
lowerCAmelCase__ = randint(0, 1_1_0)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 496 | def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
__lowercase = len(lowercase )
__lowercase = len(lowercase )
__lowercase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase = True
for i in range(lowercase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase = True
if a[i].islower():
__lowercase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod() | 534 | 0 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[int]:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
return (-y * np.log(UpperCAmelCase_ ) - (1 - y) * np.log(1 - h )).mean()
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = np.dot(UpperCAmelCase_ , UpperCAmelCase_ )
return np.sum(y * scores - np.log(1 + np.exp(UpperCAmelCase_ ) ) )
def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=7_0_0_0_0 ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = np.zeros(x.shape[1] )
for iterations in range(UpperCAmelCase_ ):
_UpperCamelCase : Any = np.dot(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : List[str] = sigmoid_function(UpperCAmelCase_ )
_UpperCamelCase : List[Any] = np.dot(x.T , h - y ) / y.size
_UpperCamelCase : Optional[Any] = theta - alpha * gradient # updating the weights
_UpperCamelCase : Optional[Any] = np.dot(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = sigmoid_function(UpperCAmelCase_ )
_UpperCamelCase : Tuple = cost_function(UpperCAmelCase_ , UpperCAmelCase_ )
if iterations % 1_0_0 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
lowerCAmelCase__ = datasets.load_iris()
lowerCAmelCase__ = iris.data[:, :2]
lowerCAmelCase__ = (iris.target != 0) * 1
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0)
print("""theta: """, theta) # printing the theta i.e our weights vector
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
return sigmoid_function(
np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(1_0, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((lowerCAmelCase__) , (lowerCAmelCase__)) = (x[:, 0].min(), x[:, 0].max())
((lowerCAmelCase__) , (lowerCAmelCase__)) = (x[:, 1].min(), x[:, 1].max())
((lowerCAmelCase__) , (lowerCAmelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
lowerCAmelCase__ = np.c_[xxa.ravel(), xxa.ravel()]
lowerCAmelCase__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 648 |
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list:
'''simple docstring'''
if len(UpperCAmelCase_ ) <= 1:
return [tuple(UpperCAmelCase_ )]
_UpperCamelCase : List[Any] = []
def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ):
_UpperCamelCase : Optional[int] = [0] * n
res.append(tuple(UpperCAmelCase_ ) )
_UpperCamelCase : List[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[0]
else:
_UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase_ ) )
c[i] += 1
_UpperCamelCase : Tuple = 0
else:
_UpperCamelCase : Tuple = 0
i += 1
generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return res
if __name__ == "__main__":
lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 648 | 1 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( lowerCamelCase ,lowerCamelCase ,lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , a , a , a , a , a , a , a , a , a , a = False , ) -> Tuple:
"""simple docstring"""
super().__init__()
_A = nn.Embedding(a , a )
_A = nn.Embedding(a , a )
_A = False
_A = nn.Dropout(p=a )
_A = TaConfig(
vocab_size=a , d_model=a , num_heads=a , d_kv=a , d_ff=a , dropout_rate=a , feed_forward_proj=a , is_decoder=a , is_encoder_decoder=a , )
_A = nn.ModuleList()
for lyr_num in range(a ):
_A = TaBlock(a )
self.encoders.append(a )
_A = TaLayerNorm(a )
_A = nn.Dropout(p=a )
def lowercase_ ( self , a , a ) -> Tuple:
"""simple docstring"""
_A = self.token_embedder(a )
_A = encoder_input_tokens.shape[1]
_A = torch.arange(a , device=encoder_input_tokens.device )
x += self.position_encoding(a )
_A = self.dropout_pre(a )
# inverted the attention mask
_A = encoder_input_tokens.size()
_A = self.get_extended_attention_mask(a , a )
for lyr in self.encoders:
_A = lyr(a , a )[0]
_A = self.layer_norm(a )
return self.dropout_post(a ), encoder_inputs_mask | 317 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure) | 317 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Union[str, Any] = get_failure_array(snake_case_ )
# 2) Step through text searching for pattern
_A : Dict = 0, 0 # index into text, pattern
while i < len(snake_case_ ):
if pattern[j] == text[i]:
if j == (len(snake_case_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_A : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def lowerCAmelCase_ ( snake_case_ ):
_A : int = [0]
_A : List[Any] = 0
_A : Union[str, Any] = 1
while j < len(snake_case_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_A : Dict = failure[i - 1]
continue
j += 1
failure.append(snake_case_ )
return failure
if __name__ == "__main__":
# Test 1)
_snake_case = "abc1abc12"
_snake_case = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_snake_case = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
_snake_case = "ABABX"
_snake_case = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
_snake_case = "AAAB"
_snake_case = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
_snake_case = "abcdabcy"
_snake_case = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
_snake_case = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 707 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Tuple:
_A : Any = size if size is not None else {"""height""": 18, """width""": 18}
_A : Optional[Any] = parent
_A : Union[str, Any] = batch_size
_A : List[Any] = num_channels
_A : List[str] = image_size
_A : Optional[Any] = min_resolution
_A : List[Any] = max_resolution
_A : Optional[Any] = do_resize
_A : str = size
_A : List[str] = do_normalize
_A : Dict = image_mean
_A : int = image_std
def a__ ( self ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DPTImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : Optional[Any] = DPTImageProcessingTester(self )
@property
def a__ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
def a__ ( self ) -> Any:
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_A : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def a__ ( self ) -> List[Any]:
# Initialize image_processing
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def a__ ( self ) -> List[str]:
# Initialize image_processing
_A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : int = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 54 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',)
_UpperCAmelCase = torch.permute(SCREAMING_SNAKE_CASE_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE_ ):
# linear layer
_UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',)
_UpperCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[Any]:
"""simple docstring"""
if "metadata" in layer:
_UpperCAmelCase = layer.split('''metadata''' )
_UpperCAmelCase = ''''''.join(split_layer[0] )[:-1]
_UpperCAmelCase = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
_UpperCAmelCase = layer.split('''kvstore''' )
_UpperCAmelCase = ''''''.join(split_layer[0] )[:-1]
_UpperCAmelCase = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
_UpperCAmelCase = layer.split('''/''' )
_UpperCAmelCase = '''/'''.join(split_layer[:-1] )
_UpperCAmelCase = (split_layer[-1],)
if "kvstore/path" in layer:
_UpperCAmelCase = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
_UpperCAmelCase = '''file'''
else:
_UpperCAmelCase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = rename_keys(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = {}
for k, v in current_block.items():
_UpperCAmelCase = v
_UpperCAmelCase = new_current_block
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str = WEIGHTS_NAME ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = convert_file_size_to_int(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = []
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = 0
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
_UpperCAmelCase = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
_UpperCAmelCase = flatten_dict(SCREAMING_SNAKE_CASE_ , sep='''/''' )
_UpperCAmelCase = {}
for layer in checkpoint_info.keys():
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = get_key_and_tensorstore_dict(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if curr_real_layer_name in all_layers:
_UpperCAmelCase = content
else:
_UpperCAmelCase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_UpperCAmelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_UpperCAmelCase = torch.tensor(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_UpperCAmelCase , _UpperCAmelCase = rename_base_flax_keys(tuple(key.split('''/''' ) ) , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = '''/'''.join(SCREAMING_SNAKE_CASE_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_UpperCAmelCase = os.path.join(
SCREAMING_SNAKE_CASE_ , weights_name.replace('''.bin''' , F'''-{len(SCREAMING_SNAKE_CASE_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = raw_weights.to(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , weights_name.replace('''.bin''' , F'''-{len(SCREAMING_SNAKE_CASE_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(SCREAMING_SNAKE_CASE_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_UpperCAmelCase = {}
_UpperCAmelCase = {}
for idx, shard in enumerate(SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = weights_name.replace(
'''.bin''' , F'''-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE_ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
_UpperCAmelCase = shard
for key in shard:
_UpperCAmelCase = shard_file
# Add the metadata
_UpperCAmelCase = {'''total_size''': total_size}
_UpperCAmelCase = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , '''w''' , encoding='''utf-8''' ) as f:
_UpperCAmelCase = json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ ) + '''\n'''
f.write(SCREAMING_SNAKE_CASE_ )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCAmelCase_ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A__ ( ) -> Dict:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_UpperCAmelCase = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
_UpperCAmelCase = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
_UpperCAmelCase = TaTokenizer.from_pretrained('''t5-small''' )
_UpperCAmelCase = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
_UpperCAmelCase = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).input_ids
_UpperCAmelCase = model.generate(SCREAMING_SNAKE_CASE_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 32 |
'''simple docstring'''
import numpy as np
import datasets
__A ='\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
__A ='\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
__A ='\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def snake_case__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""") , id="""X"""),
}) , )
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase):
# convert to numpy arrays
UpperCAmelCase__ : Tuple = np.array(_lowerCamelCase)
UpperCAmelCase__ : Any = np.array(_lowerCamelCase)
# Assert that arrays are 2D
if len(X.shape) != 2:
raise ValueError("""Expected `X` to be a 2D vector""")
if len(reference_distribution.shape) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""")
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""")
# Get mahalanobis distance for each prediction
UpperCAmelCase__ : Optional[Any] = X - np.mean(_lowerCamelCase)
UpperCAmelCase__ : str = np.cov(reference_distribution.T)
try:
UpperCAmelCase__ : Union[str, Any] = np.linalg.inv(_lowerCamelCase)
except np.linalg.LinAlgError:
UpperCAmelCase__ : List[Any] = np.linalg.pinv(_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = np.dot(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : List[str] = np.dot(_lowerCamelCase , X_minus_mu.T).diagonal()
return {"mahalanobis": mahal_dist} | 407 | 0 |
'''simple docstring'''
import math
import random
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
snake_case__ : Optional[Any] = 0.0_2
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = float(2 * (random.randint(1 , 1_0_0 )) - 1 )
for _ in range(_SCREAMING_SNAKE_CASE ):
# Forward propagation
__lowercase = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__lowercase = (expected / 1_0_0) - layer_a
# Error delta
__lowercase = layer_1_error * sigmoid_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_0_0
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : Union[str, Any] = int(input("""Expected value: """))
snake_case__ : Optional[int] = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 710 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
snake_case__ : Optional[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
snake_case__ : List[str] = {
"""allenai/led-base-16384""": 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def snake_case_ ( ):
__lowercase = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__lowercase = bs[:]
__lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
__lowercase = [chr(_SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
return pairs
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : List[str] = VOCAB_FILES_NAMES
_snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Optional[int]="replace" , lowerCamelCase : Dict="<s>" , lowerCamelCase : Dict="</s>" , lowerCamelCase : Optional[Any]="</s>" , lowerCamelCase : Any="<s>" , lowerCamelCase : List[str]="<unk>" , lowerCamelCase : Union[str, Any]="<pad>" , lowerCamelCase : Any="<mask>" , lowerCamelCase : str=False , **lowerCamelCase : Optional[Any] , ):
'''simple docstring'''
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__lowercase = json.load(lowerCamelCase )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = errors # how to handle errors in decoding
__lowercase = bytes_to_unicode()
__lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
__lowercase = merges_handle.read().split("\n" )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__lowercase = {}
__lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return len(self.encoder )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : List[Any] , lowerCamelCase : str ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowercase = tuple(lowerCamelCase )
__lowercase = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
__lowercase = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(lowerCamelCase ):
try:
__lowercase = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(lowerCamelCase )
__lowercase = new_word
if len(lowerCamelCase ) == 1:
break
else:
__lowercase = get_pairs(lowerCamelCase )
__lowercase = " ".join(lowerCamelCase )
__lowercase = word
return word
def _snake_case ( self : List[Any] , lowerCamelCase : Tuple ):
'''simple docstring'''
__lowercase = []
for token in re.findall(self.pat , lowerCamelCase ):
__lowercase = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(" " ) )
return bpe_tokens
def _snake_case ( self : Dict , lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : str , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return self.decoder.get(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = "".join(lowerCamelCase )
__lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
__lowercase = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__lowercase = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def _snake_case ( self : Tuple , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def _snake_case ( self : int , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Tuple=False , **lowerCamelCase : Any ):
'''simple docstring'''
__lowercase = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()):
__lowercase = " " + text
return (text, kwargs)
def _snake_case ( self : List[Any] , lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase : Optional[int] = None , lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , ):
'''simple docstring'''
__lowercase = super()._pad(
encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
__lowercase = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowercase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowercase = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase )
if needs_to_be_padded:
__lowercase = len(lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowercase = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
__lowercase = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 655 | 0 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class a ( UpperCAmelCase , unittest.TestCase ):
_lowercase = PriorTransformer
_lowercase = "hidden_states"
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = 4
_UpperCAmelCase : List[Any] = 8
_UpperCAmelCase : Optional[int] = 7
_UpperCAmelCase : List[str] = floats_tensor((batch_size, embedding_dim) ).to(A_ )
_UpperCAmelCase : Tuple = floats_tensor((batch_size, embedding_dim) ).to(A_ )
_UpperCAmelCase : str = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(A_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _UpperCAmelCase ( self , A_=0 ):
'''simple docstring'''
torch.manual_seed(A_ )
_UpperCAmelCase : int = 4
_UpperCAmelCase : Dict = 8
_UpperCAmelCase : Tuple = 7
_UpperCAmelCase : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(A_ )
_UpperCAmelCase : Tuple = torch.randn((batch_size, embedding_dim) ).to(A_ )
_UpperCAmelCase : Tuple = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(A_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return (4, 8)
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return (4, 8)
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
_UpperCAmelCase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : int = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(A_ )
_UpperCAmelCase : Dict = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = self.prepare_init_args_and_inputs_for_common()
_UpperCAmelCase : str = self.model_class(**A_ )
_UpperCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
_UpperCAmelCase : str = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
_UpperCAmelCase : str = model.to(A_ )
if hasattr(A_ , "set_default_attn_processor" ):
model.set_default_attn_processor()
_UpperCAmelCase : Union[str, Any] = self.get_dummy_seed_input()
with torch.no_grad():
_UpperCAmelCase : int = model(**A_ )[0]
_UpperCAmelCase : Any = output[0, :5].flatten().cpu()
print(A_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
_UpperCAmelCase : Optional[Any] = torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(A_ , A_ , rtol=1e-2 ) )
@slow
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self , A_=1 , A_=768 , A_=77 , A_=0 ):
'''simple docstring'''
torch.manual_seed(A_ )
_UpperCAmelCase : int = batch_size
_UpperCAmelCase : Dict = embedding_dim
_UpperCAmelCase : str = num_embeddings
_UpperCAmelCase : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(A_ )
_UpperCAmelCase : Tuple = torch.randn((batch_size, embedding_dim) ).to(A_ )
_UpperCAmelCase : int = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(A_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[37, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(A_ )
_UpperCAmelCase : Any = self.get_dummy_seed_input(seed=A_ )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(**A_ )[0]
assert list(sample.shape ) == [1, 768]
_UpperCAmelCase : List[str] = sample[0, :8].flatten().cpu()
print(A_ )
_UpperCAmelCase : Tuple = torch.tensor(A_ )
assert torch_all_close(A_ , A_ , atol=1e-3 )
| 300 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class a :
def __init__( self , A_ = None , A_ = None , A_=None , A_=None ):
'''simple docstring'''
if not conversation_id:
_UpperCAmelCase : Any = uuid.uuida()
if past_user_inputs is None:
_UpperCAmelCase : Optional[int] = []
if generated_responses is None:
_UpperCAmelCase : Dict = []
_UpperCAmelCase : uuid.UUID = conversation_id
_UpperCAmelCase : List[str] = past_user_inputs
_UpperCAmelCase : List[str] = generated_responses
_UpperCAmelCase : Optional[str] = text
def __eq__( self , A_ ):
'''simple docstring'''
if not isinstance(A_ , A_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _UpperCAmelCase ( self , A_ , A_ = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".' )
_UpperCAmelCase : Tuple = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
_UpperCAmelCase : int = text
def _UpperCAmelCase ( self ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_UpperCAmelCase : Dict = None
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
self.generated_responses.append(A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_UpperCAmelCase : Any = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
UpperCAmelCase , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class a ( UpperCAmelCase ):
def __init__( self , *A_ , **A_ ):
'''simple docstring'''
super().__init__(*A_ , **A_ )
if self.tokenizer.pad_token_id is None:
_UpperCAmelCase : Union[str, Any] = self.tokenizer.eos_token
def _UpperCAmelCase ( self , A_=None , A_=None , A_=None , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : Optional[int] = {}
if min_length_for_response is not None:
_UpperCAmelCase : Optional[Any] = min_length_for_response
if minimum_tokens is not None:
_UpperCAmelCase : Any = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCAmelCase : Dict = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase : int = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , A_ , A_=0 , **A_ ):
'''simple docstring'''
_UpperCAmelCase : str = super().__call__(A_ , num_workers=A_ , **A_ )
if isinstance(A_ , A_ ) and len(A_ ) == 1:
return outputs[0]
return outputs
def _UpperCAmelCase ( self , A_ , A_=32 ):
'''simple docstring'''
if not isinstance(A_ , A_ ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
_UpperCAmelCase : Optional[Any] = self.tokenizer._build_conversation_input_ids(A_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCAmelCase : Optional[int] = self._legacy_parse_and_tokenize(A_ )
if self.framework == "pt":
_UpperCAmelCase : List[str] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_UpperCAmelCase : str = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _UpperCAmelCase ( self , A_ , A_=10 , **A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = generate_kwargs.get("max_length" , self.model.config.max_length )
_UpperCAmelCase : List[Any] = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
_UpperCAmelCase : int = max_length - minimum_tokens
_UpperCAmelCase : Optional[int] = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCAmelCase : Union[str, Any] = model_inputs["attention_mask"][:, -trim:]
_UpperCAmelCase : Optional[int] = model_inputs.pop("conversation" )
_UpperCAmelCase : Union[str, Any] = max_length
_UpperCAmelCase : Any = self.model.generate(**A_ , **A_ )
if self.model.config.is_encoder_decoder:
_UpperCAmelCase : Union[str, Any] = 1
else:
_UpperCAmelCase : List[str] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _UpperCAmelCase ( self , A_ , A_=True ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = model_outputs["output_ids"]
_UpperCAmelCase : List[Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
_UpperCAmelCase : Any = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(A_ )
return conversation
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : str = self.tokenizer.eos_token_id
_UpperCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) )
if len(A_ ) > self.tokenizer.model_max_length:
_UpperCAmelCase : str = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 300 | 1 |
"""simple docstring"""
def __UpperCamelCase ( snake_case__ ):
if n == 1 or not isinstance(snake_case__ , snake_case__ ):
return 0
elif n == 2:
return 1
else:
A_ : str = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __UpperCamelCase ( snake_case__ ):
A_ : List[str] = 0
A_ : int = 2
while digits < n:
index += 1
A_ : Tuple = len(str(fibonacci(snake_case__ ) ) )
return index
def __UpperCamelCase ( snake_case__ = 1_000 ):
return fibonacci_digits_index(snake_case__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 480 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( snake_case__ , snake_case__=False ):
A_ : Dict = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((F"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", F"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=False ):
for i in range(config.num_hidden_layers ):
if base_model:
A_ : Any = """"""
else:
A_ : List[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
A_ : Union[str, Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A_ : Dict = in_proj_weight[
: config.hidden_size, :
]
A_ : int = in_proj_bias[: config.hidden_size]
A_ : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : List[str] = in_proj_weight[
-config.hidden_size :, :
]
A_ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( snake_case__ ):
A_ : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ):
A_ : Union[str, Any] = dct.pop(snake_case__ )
A_ : List[Any] = val
def __UpperCamelCase ( ):
A_ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : str = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=False ):
A_ : str = BitConfig(
global_padding="""same""" , layer_type="""bottleneck""" , depths=(3, 4, 9) , out_features=["""stage3"""] , embedding_dynamic_padding=snake_case__ , )
A_ : int = ViTHybridConfig(backbone_config=snake_case__ , image_size=384 , num_labels=1_000 )
A_ : Any = False
# load original model from timm
A_ : str = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : Optional[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(snake_case__ )
A_ : Dict = create_rename_keys(snake_case__ , snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , snake_case__ )
A_ : Any = """huggingface/label-files"""
A_ : Any = """imagenet-1k-id2label.json"""
A_ : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Optional[int] = {int(snake_case__ ): v for k, v in idalabel.items()}
A_ : Optional[int] = idalabel
A_ : Dict = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ : int = ViTHybridModel(snake_case__ ).eval()
else:
A_ : Union[str, Any] = ViTHybridForImageClassification(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
# create image processor
A_ : Any = create_transform(**resolve_data_config({} , model=snake_case__ ) )
A_ : List[Any] = transform.transforms
A_ : int = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
A_ : Optional[int] = ViTHybridImageProcessor(
do_resize=snake_case__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=snake_case__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A_ : Tuple = prepare_img()
A_ : List[str] = transform(snake_case__ ).unsqueeze(0 )
A_ : int = processor(snake_case__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(snake_case__ , snake_case__ )
# verify logits
with torch.no_grad():
A_ : List[str] = model(snake_case__ )
A_ : Any = outputs.logits
print("""Predicted class:""" , logits.argmax(-1 ).item() )
if base_model:
A_ : int = timm_model.forward_features(snake_case__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(snake_case__ , outputs.pooler_output , atol=1E-3 )
else:
A_ : Optional[int] = timm_model(snake_case__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(F"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(F"""ybelkada/{vit_name}""" )
processor.push_to_hub(F"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_r50_s16_384",
type=str,
help="Name of the hybrid ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
_lowerCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 480 | 1 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a__ = MgpstrTokenizer
a__ = False
a__ = {}
a__ = False
def lowerCAmelCase_ (self ) -> Optional[Any]:
super().setUp()
# fmt: off
__UpperCAmelCase = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__UpperCAmelCase = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
def lowerCAmelCase_ (self , **lowercase__ ) -> Any:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def lowerCAmelCase_ (self , lowercase__ ) -> Dict:
__UpperCAmelCase = '''tester'''
__UpperCAmelCase = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def lowerCAmelCase_ (self ) -> Dict:
pass
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=lowercase_ )
self.assertEqual(len(lowercase_ ) , 1 )
__UpperCAmelCase = tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
self.assertTrue(special_token not in decoded )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase = self.get_input_output_texts(lowercase_ )
__UpperCAmelCase = tokenizer.tokenize(lowercase_ )
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase_ )
__UpperCAmelCase = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertNotEqual(len(lowercase_ ) , 0 )
__UpperCAmelCase = tokenizer.decode(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , lowercase_ )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def lowerCAmelCase_ (self ) -> List[Any]:
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def lowerCAmelCase_ (self ) -> str:
pass
| 303 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
def __init__( self : Optional[int] , lowercase_ : Dict , lowercase_ : List[str]=2 , lowercase_ : str=8 , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=True , lowercase_ : List[str]=True , lowercase_ : int=99 , lowercase_ : List[Any]=16 , lowercase_ : Tuple=5 , lowercase_ : Optional[int]=2 , lowercase_ : List[Any]=36 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : Tuple=0.0 , lowercase_ : List[Any]=0.0 , lowercase_ : List[Any]=512 , lowercase_ : Optional[int]=16 , lowercase_ : int=2 , lowercase_ : Any=0.02 , lowercase_ : Any=3 , lowercase_ : Any=4 , lowercase_ : int=None , ):
snake_case_ : int = parent
snake_case_ : List[Any] = batch_size
snake_case_ : Optional[Any] = seq_length
snake_case_ : List[Any] = is_training
snake_case_ : Optional[int] = use_input_mask
snake_case_ : Optional[int] = use_token_type_ids
snake_case_ : Union[str, Any] = use_labels
snake_case_ : Dict = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : List[Any] = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : Any = type_vocab_size
snake_case_ : Optional[Any] = type_sequence_label_size
snake_case_ : Optional[Any] = initializer_range
snake_case_ : Any = num_labels
snake_case_ : Optional[int] = num_choices
snake_case_ : Optional[Any] = scope
def _snake_case ( self : List[str] ):
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : str = None
if self.use_input_mask:
snake_case_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : int = None
if self.use_token_type_ids:
snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : str = None
snake_case_ : Tuple = None
snake_case_ : List[str] = None
if self.use_labels:
snake_case_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : str = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : Optional[int] ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def _snake_case ( self : int ):
snake_case_ : Dict = self.get_config()
snake_case_ : List[str] = 300
return config
def _snake_case ( self : Dict ):
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : Optional[Any] = self.prepare_config_and_inputs()
snake_case_ : List[Any] = True
snake_case_ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _snake_case ( self : str , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : List[str] ):
snake_case_ : Any = MraModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
snake_case_ : List[str] = model(lowercase_ , token_type_ids=lowercase_ )
snake_case_ : Dict = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Optional[Any] , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[Any] , ):
snake_case_ : Any = True
snake_case_ : List[Any] = MraModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Tuple = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
snake_case_ : Any = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , encoder_hidden_states=lowercase_ , )
snake_case_ : List[str] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Tuple , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : List[str] ):
snake_case_ : List[Any] = MraForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : str , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Tuple ):
snake_case_ : List[Any] = MraForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : int = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] ):
snake_case_ : int = self.num_labels
snake_case_ : int = MraForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : List[str] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : str ):
snake_case_ : List[Any] = self.num_labels
snake_case_ : Union[str, Any] = MraForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self : Dict , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Optional[Any] ):
snake_case_ : Union[str, Any] = self.num_choices
snake_case_ : int = MraForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : str = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : Optional[Any] = config_and_inputs
snake_case_ : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Any = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : Any = False
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : str = False
_lowerCAmelCase : List[Any] = ()
def _snake_case ( self : Optional[int] ):
snake_case_ : Any = MraModelTester(self )
snake_case_ : int = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def _snake_case ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self : Tuple ):
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _snake_case ( self : str ):
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : Dict = type
self.model_tester.create_and_check_model(*lowercase_ )
def _snake_case ( self : Dict ):
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def _snake_case ( self : Tuple ):
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
def _snake_case ( self : Optional[int] ):
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def _snake_case ( self : Optional[Any] ):
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def _snake_case ( self : str ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def _snake_case ( self : Dict ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Union[str, Any] = MraModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip(reason='''MRA does not output attentions''' )
def _snake_case ( self : Optional[Any] ):
return
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
@slow
def _snake_case ( self : Optional[Any] ):
snake_case_ : Union[str, Any] = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
snake_case_ : Union[str, Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ : str = model(lowercase_ )[0]
snake_case_ : Optional[int] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , lowercase_ )
snake_case_ : int = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
@slow
def _snake_case ( self : List[Any] ):
snake_case_ : Optional[Any] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
snake_case_ : Union[str, Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ : Optional[int] = model(lowercase_ )[0]
snake_case_ : Optional[Any] = 50265
snake_case_ : Optional[Any] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , lowercase_ )
snake_case_ : Optional[int] = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
@slow
def _snake_case ( self : Optional[int] ):
snake_case_ : Any = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
snake_case_ : Optional[int] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ : Tuple = model(lowercase_ )[0]
snake_case_ : List[str] = 50265
snake_case_ : Optional[Any] = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , lowercase_ )
snake_case_ : Tuple = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
| 123 | 0 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCAmelCase: Dict = re.compile(r'\s+')
def lowerCamelCase__ ( _A ):
return {"hash": hashlib.mda(re.sub(__snake_case , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def lowerCamelCase__ ( _A ):
a : Union[str, Any] = [len(__snake_case ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(__snake_case ), "line_max": max(__snake_case )}
def lowerCamelCase__ ( _A ):
a : Union[str, Any] = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def lowerCamelCase__ ( _A , _A ):
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def lowerCamelCase__ ( _A , _A=5 ):
a : Dict = ["auto-generated", "autogenerated", "automatically generated"]
a : List[Any] = example["content"].splitlines()
for _, line in zip(range(__snake_case ) , __snake_case ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCamelCase__ ( _A , _A=5 , _A=0.05 ):
a : int = ["unit tests", "test file", "configuration file"]
a : Any = example["content"].splitlines()
a : List[Any] = 0
a : Union[str, Any] = 0
# first test
for _, line in zip(range(__snake_case ) , __snake_case ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
a : Dict = example["content"].count('\n' )
a : Optional[Any] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCamelCase__ ( _A ):
a : str = ["def ", "class ", "for ", "while "]
a : int = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCamelCase__ ( _A , _A=4 ):
a : Tuple = example["content"].splitlines()
a : Optional[int] = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCamelCase__ ( _A ):
a : List[str] = tokenizer(example['content'] , truncation=__snake_case )["input_ids"]
a : int = len(example['content'] ) / len(__snake_case )
return {"ratio": ratio}
def lowerCamelCase__ ( _A ):
a : str = {}
results.update(get_hash(__snake_case ) )
results.update(line_stats(__snake_case ) )
results.update(alpha_stats(__snake_case ) )
results.update(char_token_ratio(__snake_case ) )
results.update(is_autogenerated(__snake_case ) )
results.update(is_config_or_test(__snake_case ) )
results.update(has_no_keywords(__snake_case ) )
results.update(has_few_assignments(__snake_case ) )
return results
def lowerCamelCase__ ( _A , _A , _A ):
if not check_uniques(__snake_case , __snake_case ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCamelCase__ ( _A ):
with open(__snake_case , 'rb' ) as f_in:
with gzip.open(str(__snake_case ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__snake_case , __snake_case )
os.unlink(__snake_case )
# Settings
lowerCAmelCase: Any = HfArgumentParser(PreprocessingArguments)
lowerCAmelCase: Optional[int] = parser.parse_args()
if args.num_workers is None:
lowerCAmelCase: Any = multiprocessing.cpu_count()
lowerCAmelCase: List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCAmelCase: int = time.time()
lowerCAmelCase: Dict = load_dataset(args.dataset_name, split='train')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
lowerCAmelCase: Optional[Any] = time.time()
lowerCAmelCase: Tuple = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
lowerCAmelCase: Dict = set(ds.unique('hash'))
lowerCAmelCase: List[Any] = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
lowerCAmelCase: List[str] = time.time()
lowerCAmelCase: Tuple = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCAmelCase: Tuple = time.time()
lowerCAmelCase: Optional[Any] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
lowerCAmelCase: Optional[int] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
lowerCAmelCase: Any = output_dir / "data"
data_dir.mkdir(exist_ok=True)
lowerCAmelCase: Optional[Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCAmelCase: Dict = str(data_dir / F"file-{file_number+1:012}.json")
lowerCAmelCase: List[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}") | 713 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a__:
def __init__( self : List[str] , __snake_case : Optional[Any] , __snake_case : Any=13 , __snake_case : int=30 , __snake_case : Union[str, Any]=2 , __snake_case : Optional[int]=3 , __snake_case : Dict=True , __snake_case : List[str]=True , __snake_case : Any=32 , __snake_case : List[Any]=5 , __snake_case : List[Any]=4 , __snake_case : Optional[int]=37 , __snake_case : int="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=0.1 , __snake_case : List[str]=10 , __snake_case : int=0.02 , __snake_case : Optional[int]=3 , __snake_case : Tuple=0.6 , __snake_case : Union[str, Any]=None , ):
a : List[str] = parent
a : Tuple = batch_size
a : Union[str, Any] = image_size
a : List[str] = patch_size
a : Optional[Any] = num_channels
a : Optional[Any] = is_training
a : List[Any] = use_labels
a : Union[str, Any] = hidden_size
a : Dict = num_hidden_layers
a : Optional[Any] = num_attention_heads
a : Optional[Any] = intermediate_size
a : int = hidden_act
a : Dict = hidden_dropout_prob
a : Optional[Any] = attention_probs_dropout_prob
a : Optional[Any] = type_sequence_label_size
a : Optional[Any] = initializer_range
a : Union[str, Any] = mask_ratio
a : Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
a : Tuple = (image_size // patch_size) ** 2
a : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowercase_ ( self : List[str] ):
a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : Tuple = None
if self.use_labels:
a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : str ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowercase_ ( self : Dict , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Union[str, Any] ):
a : Union[str, Any] = ViTMAEModel(config=__snake_case )
model.to(__snake_case )
model.eval()
a : Optional[Any] = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Optional[Any] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Dict ):
a : Dict = ViTMAEForPreTraining(__snake_case )
model.to(__snake_case )
model.eval()
a : Dict = model(__snake_case )
a : str = (self.image_size // self.patch_size) ** 2
a : int = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
a : Any = 1
a : Optional[int] = ViTMAEForPreTraining(__snake_case )
model.to(__snake_case )
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a : Optional[Any] = model(__snake_case )
a : Dict = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowercase_ ( self : Dict ):
a : List[str] = self.prepare_config_and_inputs()
a , a , a : Any = config_and_inputs
a : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowercase__ = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase_ ( self : Tuple ):
a : List[str] = ViTMAEModelTester(self )
a : List[str] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def lowercase_ ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def lowercase_ ( self : List[str] ):
pass
def lowercase_ ( self : Dict ):
a , a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Optional[Any] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def lowercase_ ( self : List[str] ):
a , a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Any = model_class(__snake_case )
a : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : int = [*signature.parameters.keys()]
a : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case )
def lowercase_ ( self : Any ):
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowercase_ ( self : List[Any] ):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__snake_case )
def lowercase_ ( self : List[str] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ):
# make masks reproducible
np.random.seed(2 )
a : Union[str, Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
a : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
a : List[Any] = torch.from_numpy(__snake_case )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
a : List[str] = pt_noise
super().check_pt_tf_models(__snake_case , __snake_case , __snake_case )
def lowercase_ ( self : Optional[int] ):
a , a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Any = model_class(__snake_case )
model.to(__snake_case )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
a : Tuple = model(**self._prepare_for_class(__snake_case , __snake_case ) )
a : List[str] = outputs[0].cpu().numpy()
a : str = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__snake_case )
a : Dict = model_class.from_pretrained(__snake_case )
model.to(__snake_case )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
a : int = model(**self._prepare_for_class(__snake_case , __snake_case ) )
# Make sure we don't have nans
a : List[Any] = after_outputs[0].cpu().numpy()
a : Any = 0
a : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__snake_case , 1e-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def lowercase_ ( self : Any ):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def lowercase_ ( self : int ):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def lowercase_ ( self : int ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self : Any ):
pass
@slow
def lowercase_ ( self : Dict ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : str = ViTMAEModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowerCamelCase__ ( ):
a : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a__( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Union[str, Any] ):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def lowercase_ ( self : Union[str, Any] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
a : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(__snake_case )
a : str = self.default_image_processor
a : Dict = prepare_img()
a : Union[str, Any] = image_processor(images=__snake_case , return_tensors='pt' ).to(__snake_case )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
a : Tuple = ViTMAEConfig()
a : Optional[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
a : List[str] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
a : Tuple = model(**__snake_case , noise=torch.from_numpy(__snake_case ).to(device=__snake_case ) )
# verify the logits
a : int = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , __snake_case )
a : Optional[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__snake_case ) , atol=1e-4 ) ) | 195 | 0 |
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class _lowercase ( __lowercase ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str = "▁" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[str, AddedToken] = "<unk>" , SCREAMING_SNAKE_CASE_ : Union[str, AddedToken] = "</s>" , SCREAMING_SNAKE_CASE_ : Union[str, AddedToken] = "<pad>" , ) -> str:
__snake_case = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
__snake_case = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__snake_case = token_dict['token']
__snake_case = Tokenizer(Unigram() )
__snake_case = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
__snake_case = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ),
pre_tokenizers.Digits(individual_digits=SCREAMING_SNAKE_CASE_ ),
pre_tokenizers.Punctuation(),
] )
__snake_case = decoders.Metaspace(replacement=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ )
__snake_case = TemplateProcessing(
single=f'$A {self.special_tokens["eos"]["token"]}' , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
__snake_case = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, List[str]] , SCREAMING_SNAKE_CASE_ : int = 8000 , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Tuple:
__snake_case = trainers.UnigramTrainer(
vocab_size=SCREAMING_SNAKE_CASE_ , special_tokens=self.special_tokens_list , show_progress=SCREAMING_SNAKE_CASE_ , )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = [files]
self._tokenizer.train(SCREAMING_SNAKE_CASE_ , trainer=SCREAMING_SNAKE_CASE_ )
self.add_unk_id()
def a ( self : str , SCREAMING_SNAKE_CASE_ : Union[Iterator[str], Iterator[Iterator[str]]] , SCREAMING_SNAKE_CASE_ : int = 8000 , SCREAMING_SNAKE_CASE_ : bool = True , ) -> str:
__snake_case = trainers.UnigramTrainer(
vocab_size=SCREAMING_SNAKE_CASE_ , special_tokens=self.special_tokens_list , show_progress=SCREAMING_SNAKE_CASE_ , )
self._tokenizer.train_from_iterator(SCREAMING_SNAKE_CASE_ , trainer=SCREAMING_SNAKE_CASE_ )
self.add_unk_id()
def a ( self : Dict ) -> str:
__snake_case = json.loads(self._tokenizer.to_str() )
__snake_case = self.special_tokens['unk']['id']
__snake_case = Tokenizer.from_str(json.dumps(SCREAMING_SNAKE_CASE_ ) )
| 56 |
'''simple docstring'''
from typing import Any
class _lowercase :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
__snake_case = data
__snake_case = None
class _lowercase :
def __init__( self : List[Any] ) -> Tuple:
__snake_case = None
def a ( self : int ) -> Union[str, Any]:
__snake_case = self.head
while temp is not None:
print(temp.data , end=' ' )
__snake_case = temp.next
print()
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
__snake_case = Node(SCREAMING_SNAKE_CASE_ )
__snake_case = self.head
__snake_case = new_node
def a ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
if node_data_a == node_data_a:
return
else:
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
__snake_case = self.head
while node_a is not None and node_a.data != node_data_a:
__snake_case = node_a.next
if node_a is None or node_a is None:
return
__snake_case , __snake_case = node_a.data, node_a.data
if __name__ == "__main__":
_a : Dict = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 56 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 719 |
'''simple docstring'''
def lowerCamelCase ( _snake_case : int = 4_000_000 ):
'''simple docstring'''
lowercase__ = [0, 1]
lowercase__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowercase__ = 0
for j in range(len(_snake_case ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 539 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__snake_case : Tuple = logging.getLogger(__name__)
def lowerCamelCase__ ( A_ , A_ ):
return (preds == labels).mean()
@dataclass
class lowercase_ :
a_ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class lowercase_ :
a_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
a_ = field(metadata={"""help""": """Should contain the data files for the task."""} )
a_ = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a_ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def lowerCamelCase__ ( ):
UpperCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , _a )
# Set seed
set_seed(training_args.seed )
try:
UpperCAmelCase_ = processors[data_args.task_name]()
UpperCAmelCase_ = processor.get_labels()
UpperCAmelCase_ = len(_a )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(A_ ) -> Dict:
UpperCAmelCase_ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_a , p.label_ids )}
# Data collator
UpperCAmelCase_ = DataCollatorWithPadding(_a , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase_ = Trainer(
model=_a , args=_a , train_dataset=_a , eval_dataset=_a , compute_metrics=_a , data_collator=_a , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase_ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ = trainer.evaluate()
UpperCAmelCase_ = os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(_a , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , _a , _a )
writer.write("%s = %s\n" % (key, value) )
results.update(_a )
return results
def lowerCamelCase__ ( A_ ):
main()
if __name__ == "__main__":
main()
| 660 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_snake_case = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : str , **SCREAMING_SNAKE_CASE__ : str ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[np.ndarray, bytes, str] , **SCREAMING_SNAKE_CASE__ : Tuple ):
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Any , **SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = {}
if "candidate_labels" in kwargs:
lowerCamelCase__ = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
lowerCamelCase__ = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def _UpperCamelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Any="This is a sound of {}." ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowerCamelCase__ = requests.get(SCREAMING_SNAKE_CASE__ ).content
else:
with open(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
lowerCamelCase__ = f.read()
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = ffmpeg_read(SCREAMING_SNAKE_CASE__ , self.feature_extractor.sampling_rate )
if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
lowerCamelCase__ = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='pt' )
lowerCamelCase__ = candidate_labels
lowerCamelCase__ = [hypothesis_template.format(SCREAMING_SNAKE_CASE__ ) for x in candidate_labels]
lowerCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = [text_inputs]
return inputs
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ):
lowerCamelCase__ = model_inputs.pop('candidate_labels' )
lowerCamelCase__ = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = text_inputs[0]
else:
# Batching case.
lowerCamelCase__ = text_inputs[0][0]
lowerCamelCase__ = self.model(**SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def _UpperCamelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
lowerCamelCase__ = model_outputs.pop('candidate_labels' )
lowerCamelCase__ = model_outputs['logits'][0]
if self.framework == "pt":
lowerCamelCase__ = logits.softmax(dim=0 )
lowerCamelCase__ = probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
lowerCamelCase__ = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , key=lambda SCREAMING_SNAKE_CASE__ : -x[0] )
]
return result
| 510 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a : Optional[Any] = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 710 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a : List[Any] = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> int:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> List[str]:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = """</s>"""
UpperCAmelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(A ) , 1103 )
def _lowercase( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _lowercase( self ) -> int:
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Union[str, Any] = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
UpperCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : List[Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase : Any = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
UpperCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def _lowercase( self ) -> int:
UpperCAmelCase : str = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
UpperCAmelCase : List[Any] = """To ensure a smooth flow of bank resolutions."""
UpperCAmelCase : Optional[int] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _lowercase( self ) -> Any:
UpperCAmelCase : int = ["""This is going to be way too long.""" * 150, """short example"""]
UpperCAmelCase : Optional[int] = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def _lowercase( self ) -> List[str]:
# fmt: off
UpperCAmelCase : List[str] = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = PegasusTokenizer
lowercase = PegasusTokenizerFast
lowercase = True
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : int = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _lowercase( self , **A ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> str:
return ("This is a test", "This is a test")
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : str = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
UpperCAmelCase : str = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = ["""This is going to be way too long.""" * 1000, """short example"""]
UpperCAmelCase : Any = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : int = self._large_tokenizer(A , padding=A , truncation=A , return_tensors="""pt""" )
UpperCAmelCase : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
UpperCAmelCase : Optional[Any] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 672 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
A = get_logger()
A = None
class SCREAMING_SNAKE_CASE ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
"""simple docstring"""
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase ):
"""simple docstring"""
super().__init__(features=__UpperCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(__UpperCamelCase )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
snake_case_ = device if isinstance(__UpperCamelCase , __UpperCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
snake_case_ = str(jax.devices()[0] )
snake_case_ = jnp_array_kwargs
@staticmethod
def __lowerCAmelCase ( ):
"""simple docstring"""
import jax
return {str(__UpperCamelCase ): device for device in jax.devices()}
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , __UpperCamelCase ) and column:
if all(
isinstance(__UpperCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__UpperCamelCase , axis=0 )
return column
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , (str, bytes, type(__UpperCamelCase )) ):
return value
elif isinstance(__UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case_ = {}
if isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case_ = {'dtype': jnp.intaa}
else:
snake_case_ = {'dtype': jnp.intaa}
elif isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case_ = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__UpperCamelCase , PIL.Image.Image ):
snake_case_ = np.asarray(__UpperCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__UpperCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__UpperCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__UpperCamelCase , '__array__' ) and not isinstance(__UpperCamelCase , jax.Array ):
snake_case_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
elif isinstance(__UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , __UpperCamelCase , map_list=__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.numpy_arrow_extractor().extract_row(__UpperCamelCase )
snake_case_ = self.python_features_decoder.decode_row(__UpperCamelCase )
return self.recursive_tensorize(__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.numpy_arrow_extractor().extract_column(__UpperCamelCase )
snake_case_ = self.python_features_decoder.decode_column(__UpperCamelCase , pa_table.column_names[0] )
snake_case_ = self.recursive_tensorize(__UpperCamelCase )
snake_case_ = self._consolidate(__UpperCamelCase )
return column
def __lowerCAmelCase ( self , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = self.numpy_arrow_extractor().extract_batch(__UpperCamelCase )
snake_case_ = self.python_features_decoder.decode_batch(__UpperCamelCase )
snake_case_ = self.recursive_tensorize(__UpperCamelCase )
for column_name in batch:
snake_case_ = self._consolidate(batch[column_name] )
return batch
| 187 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=4_00 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , __UpperCamelCase=1 / 2_55 , __UpperCamelCase=True , ):
"""simple docstring"""
snake_case_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_pad
def __lowerCAmelCase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
if not batched:
snake_case_ = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
snake_case_ , snake_case_ = image.size
else:
snake_case_ , snake_case_ = image.shape[1], image.shape[2]
if w < h:
snake_case_ = int(self.size['shortest_edge'] * h / w )
snake_case_ = self.size['shortest_edge']
elif w > h:
snake_case_ = self.size['shortest_edge']
snake_case_ = int(self.size['shortest_edge'] * w / h )
else:
snake_case_ = self.size['shortest_edge']
snake_case_ = self.size['shortest_edge']
else:
snake_case_ = []
for image in image_inputs:
snake_case_ , snake_case_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
snake_case_ = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = DetaImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = DetaImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'do_rescale' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'do_pad' ) )
self.assertTrue(hasattr(__UpperCamelCase , 'size' ) )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
snake_case_ = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(__UpperCamelCase , return_tensors='pt' ).pixel_values
snake_case_ , snake_case_ = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {'image_id': 3_97_69, 'annotations': target}
# encode them
snake_case_ = DetaImageProcessor()
snake_case_ = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors='pt' )
# verify pixel values
snake_case_ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCamelCase )
snake_case_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
snake_case_ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCamelCase ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCamelCase )
snake_case_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCamelCase ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCamelCase ) )
# verify class_labels
snake_case_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCamelCase ) )
# verify orig_size
snake_case_ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCamelCase ) )
# verify size
snake_case_ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCamelCase ) )
@slow
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
snake_case_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
snake_case_ = DetaImageProcessor(format='coco_panoptic' )
snake_case_ = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors='pt' )
# verify pixel values
snake_case_ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , __UpperCamelCase )
snake_case_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
snake_case_ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCamelCase ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCamelCase )
snake_case_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCamelCase ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCamelCase ) )
# verify class_labels
snake_case_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCamelCase ) )
# verify masks
snake_case_ = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __UpperCamelCase )
# verify orig_size
snake_case_ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCamelCase ) )
# verify size
snake_case_ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCamelCase ) )
| 187 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 710 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
snake_case__ = '''scheduler_config.json'''
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = 5
lowerCamelCase_ = 6
lowerCamelCase_ = 7
lowerCamelCase_ = 8
lowerCamelCase_ = 9
lowerCamelCase_ = 10
lowerCamelCase_ = 11
lowerCamelCase_ = 12
lowerCamelCase_ = 13
lowerCamelCase_ = 14
@dataclass
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = 42
class lowerCAmelCase_ :
lowerCamelCase_ = SCHEDULER_CONFIG_NAME
lowerCamelCase_ = []
lowerCamelCase_ = True
@classmethod
def _snake_case ( cls : Union[str, Any] , __A : Dict[str, Any] = None , __A : Optional[str] = None , __A : Optional[int]=False , **__A : Union[str, Any] , ) ->List[Any]:
"""simple docstring"""
a__ , a__ , a__ :List[Any] = cls.load_config(
pretrained_model_name_or_path=__A , subfolder=__A , return_unused_kwargs=__A , return_commit_hash=__A , **__A , )
return cls.from_config(__A , return_unused_kwargs=__A , **__A )
def _snake_case ( self : str , __A : Union[str, os.PathLike] , __A : bool = False , **__A : Optional[Any] ) ->str:
"""simple docstring"""
self.save_config(save_directory=__A , push_to_hub=__A , **__A )
@property
def _snake_case ( self : List[Any] ) ->Dict:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def _snake_case ( cls : Dict ) ->int:
"""simple docstring"""
a__ :Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
a__ :Union[str, Any] = importlib.import_module(__name__.split("." )[0] )
a__ :Optional[int] = [
getattr(__A , __A ) for c in compatible_classes_str if hasattr(__A , __A )
]
return compatible_classes
| 373 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = StableDiffusionSAGPipeline
lowercase__ = TEXT_TO_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ = False
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
torch.manual_seed(0)
lowercase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
lowercase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
torch.manual_seed(0)
lowercase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0)
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowercase_ = CLIPTextModel(lowerCAmelCase_)
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
lowercase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any]=0):
"""simple docstring"""
if str(lowerCAmelCase_).startswith("""mps"""):
lowercase_ = torch.manual_seed(lowerCAmelCase_)
else:
lowercase_ = torch.Generator(device=lowerCAmelCase_).manual_seed(lowerCAmelCase_)
lowercase_ = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : str):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""")
lowercase_ = sag_pipe.to(lowerCAmelCase_)
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase_)
lowercase_ = """."""
lowercase_ = torch.manual_seed(0)
lowercase_ = sag_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="""np""")
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-2
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""")
lowercase_ = sag_pipe.to(lowerCAmelCase_)
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase_)
lowercase_ = """."""
lowercase_ = torch.manual_seed(0)
lowercase_ = sag_pipe(
[prompt] , generator=lowerCAmelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="""np""")
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-2
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""")
lowercase_ = sag_pipe.to(lowerCAmelCase_)
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase_)
lowercase_ = """."""
lowercase_ = torch.manual_seed(0)
lowercase_ = sag_pipe(
[prompt] , width=7_6_8 , height=5_1_2 , generator=lowerCAmelCase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="""np""" , )
lowercase_ = output.images
assert image.shape == (1, 5_1_2, 7_6_8, 3)
| 567 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = 42
lowercase__ = None
lowercase__ = None
UpperCAmelCase : Dict = namedtuple("CoinsDistribResult", "moves excess")
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__lowerCAmelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__lowerCAmelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__lowerCAmelCase ) != count_coins(__lowerCAmelCase ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(__lowerCAmelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase_ , lowercase_ = get_distrib(node.left )
lowercase_ , lowercase_ = get_distrib(node.right )
lowercase_ = 1 - left_distrib_excess
lowercase_ = 1 - right_distrib_excess
lowercase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(__lowerCAmelCase )
+ abs(__lowerCAmelCase )
)
lowercase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__lowerCAmelCase , __lowerCAmelCase )
return get_distrib(__lowerCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 567 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : int = 384
__lowerCAmelCase : List[Any] = 7
if "tiny" in model_name:
__lowerCAmelCase : List[Any] = 96
__lowerCAmelCase : Tuple = (2, 2, 6, 2)
__lowerCAmelCase : Any = (3, 6, 12, 24)
elif "small" in model_name:
__lowerCAmelCase : List[str] = 96
__lowerCAmelCase : str = (2, 2, 18, 2)
__lowerCAmelCase : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
__lowerCAmelCase : Dict = 128
__lowerCAmelCase : Optional[int] = (2, 2, 18, 2)
__lowerCAmelCase : str = (4, 8, 16, 32)
__lowerCAmelCase : Dict = 12
__lowerCAmelCase : List[Any] = 512
elif "large" in model_name:
__lowerCAmelCase : Optional[int] = 192
__lowerCAmelCase : Tuple = (2, 2, 18, 2)
__lowerCAmelCase : int = (6, 12, 24, 48)
__lowerCAmelCase : Dict = 12
__lowerCAmelCase : Tuple = 768
# set label information
__lowerCAmelCase : Union[str, Any] = 150
__lowerCAmelCase : Any = 'huggingface/label-files'
__lowerCAmelCase : str = 'ade20k-id2label.json'
__lowerCAmelCase : Any = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
__lowerCAmelCase : Tuple = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase : Any = {v: k for k, v in idalabel.items()}
__lowerCAmelCase : int = SwinConfig(
embed_dim=_UpperCamelCase , depths=_UpperCamelCase , num_heads=_UpperCamelCase , window_size=_UpperCamelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
__lowerCAmelCase : Dict = UperNetConfig(
backbone_config=_UpperCamelCase , auxiliary_in_channels=_UpperCamelCase , num_labels=_UpperCamelCase , idalabel=_UpperCamelCase , labelaid=_UpperCamelCase , )
return config
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Dict = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm1.weight", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm1.bias", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", F"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", F"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", F"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", F"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm2.weight", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm2.bias", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", F"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", F"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", F"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", F"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.stages.{i}.downsample.reduction.weight", F"backbone.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.stages.{i}.downsample.norm.weight", F"backbone.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.stages.{i}.downsample.norm.bias", F"backbone.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Tuple = dct.pop(_UpperCamelCase )
__lowerCAmelCase : Optional[Any] = val
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCAmelCase : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCAmelCase : Dict = state_dict.pop(F"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" )
__lowerCAmelCase : Dict = state_dict.pop(F"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase : Any = in_proj_weight[:dim, :]
__lowerCAmelCase : Optional[Any] = in_proj_bias[: dim]
__lowerCAmelCase : List[Any] = in_proj_weight[
dim : dim * 2, :
]
__lowerCAmelCase : List[str] = in_proj_bias[
dim : dim * 2
]
__lowerCAmelCase : List[str] = in_proj_weight[
-dim :, :
]
__lowerCAmelCase : List[str] = in_proj_bias[-dim :]
# fmt: on
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase , __lowerCAmelCase : Dict = x.shape
__lowerCAmelCase : Tuple = x.reshape(_UpperCamelCase , 4 , in_channel // 4 )
__lowerCAmelCase : Union[str, Any] = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_UpperCamelCase , _UpperCamelCase )
return x
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = x.shape
__lowerCAmelCase : List[str] = x.reshape(_UpperCamelCase , in_channel // 4 , 4 )
__lowerCAmelCase : List[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_UpperCamelCase , _UpperCamelCase )
return x
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : str = x.shape[0]
__lowerCAmelCase : Optional[Any] = x.reshape(4 , in_channel // 4 )
__lowerCAmelCase : List[str] = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_UpperCamelCase )
return x
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = x.shape[0]
__lowerCAmelCase : Optional[int] = x.reshape(in_channel // 4 , 4 )
__lowerCAmelCase : List[Any] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_UpperCamelCase )
return x
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[str] = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
__lowerCAmelCase : Dict = model_name_to_url[model_name]
__lowerCAmelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='cpu' , file_name=_UpperCamelCase )[
'state_dict'
]
for name, param in state_dict.items():
print(_UpperCamelCase , param.shape )
__lowerCAmelCase : Any = get_upernet_config(_UpperCamelCase )
__lowerCAmelCase : List[str] = UperNetForSemanticSegmentation(_UpperCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__lowerCAmelCase : Dict = state_dict.pop(_UpperCamelCase )
if "bn" in key:
__lowerCAmelCase : Tuple = key.replace('bn' , 'batch_norm' )
__lowerCAmelCase : Tuple = val
# rename keys
__lowerCAmelCase : Any = create_rename_keys(_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__lowerCAmelCase : Tuple = reverse_correct_unfold_reduction_order(_UpperCamelCase )
if "norm" in key:
__lowerCAmelCase : str = reverse_correct_unfold_norm_order(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# verify on image
__lowerCAmelCase : Dict = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
__lowerCAmelCase : List[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert('RGB' )
__lowerCAmelCase : Any = SegformerImageProcessor()
__lowerCAmelCase : Optional[int] = processor(_UpperCamelCase , return_tensors='pt' ).pixel_values
with torch.no_grad():
__lowerCAmelCase : List[str] = model(_UpperCamelCase )
__lowerCAmelCase : Dict = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__lowerCAmelCase : str = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
__lowerCAmelCase : Dict = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
__lowerCAmelCase : Optional[Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
__lowerCAmelCase : int = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCamelCase , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[f'upernet-swin-{size}' for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 549 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class A__ :
def __init__( self ):
__lowerCAmelCase : Any = {}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = {}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if nodea not in self.connections:
self.add_node(_SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = probability
def __lowerCamelCase ( self ):
return list(self.connections )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : List[Any] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : str = Counter(graph.get_nodes() )
__lowerCAmelCase : Tuple = start
for _ in range(_UpperCamelCase ):
__lowerCAmelCase : int = graph.transition(_UpperCamelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod() | 549 | 1 |
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__=None, SCREAMING_SNAKE_CASE__=None ) -> int:
return field(default_factory=lambda: default, metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class snake_case_ :
__lowerCAmelCase = list_field(
default=[] ,metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} ,)
__lowerCAmelCase = list_field(
default=[8] ,metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
__lowerCAmelCase = list_field(
default=[8, 3_2, 1_2_8, 5_1_2] ,metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} ,)
__lowerCAmelCase = field(
default=a_ ,metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} ,)
__lowerCAmelCase = field(
default=a_ ,metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} ,)
__lowerCAmelCase = field(
default=a_ ,metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
__lowerCAmelCase = field(default=a_ ,metadata={"help": "Use FP16 to accelerate inference."} )
__lowerCAmelCase = field(default=a_ ,metadata={"help": "Benchmark training of model"} )
__lowerCAmelCase = field(default=a_ ,metadata={"help": "Verbose memory tracing"} )
__lowerCAmelCase = field(
default=a_ ,metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} ,)
__lowerCAmelCase = field(
default=a_ ,metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} ,)
__lowerCAmelCase = field(default=a_ ,metadata={"help": "Trace memory line by line"} )
__lowerCAmelCase = field(default=a_ ,metadata={"help": "Save result to a CSV file"} )
__lowerCAmelCase = field(default=a_ ,metadata={"help": "Save all print statements in a log file"} )
__lowerCAmelCase = field(default=a_ ,metadata={"help": "Whether to print environment information"} )
__lowerCAmelCase = field(
default=a_ ,metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} ,)
__lowerCAmelCase = field(
default=f"""inference_time_{round(time() )}.csv""" ,metadata={"help": "CSV filename used if saving time results to csv."} ,)
__lowerCAmelCase = field(
default=f"""inference_memory_{round(time() )}.csv""" ,metadata={"help": "CSV filename used if saving memory results to csv."} ,)
__lowerCAmelCase = field(
default=f"""train_time_{round(time() )}.csv""" ,metadata={"help": "CSV filename used if saving time results to csv for training."} ,)
__lowerCAmelCase = field(
default=f"""train_memory_{round(time() )}.csv""" ,metadata={"help": "CSV filename used if saving memory results to csv for training."} ,)
__lowerCAmelCase = field(
default=f"""env_info_{round(time() )}.csv""" ,metadata={"help": "CSV filename used if saving environment information."} ,)
__lowerCAmelCase = field(
default=f"""log_{round(time() )}.csv""" ,metadata={"help": "Log filename used if print statements are saved in log."} ,)
__lowerCAmelCase = field(default=3 ,metadata={"help": "Times an experiment will be run."} )
__lowerCAmelCase = field(
default=a_ ,metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} ,)
def snake_case_ ( self ):
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , a_ , )
def snake_case_ ( self ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def snake_case_ ( self ):
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def snake_case_ ( self ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True | 237 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 237 | 1 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] ):
# Initialise PyTorch model
lowerCAmelCase__ :Any = BigBirdConfig.from_json_file(UpperCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
lowerCAmelCase__ :int = BigBirdForQuestionAnswering(UpperCAmelCase )
else:
lowerCAmelCase__ :Dict = BigBirdForPreTraining(UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCAmelCase , UpperCAmelCase , is_trivia_qa=UpperCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
_a : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 717 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = ['''vqvae''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , mel=_lowerCAmelCase , vqvae=_lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , _lowerCAmelCase ) else 1_000
@torch.no_grad()
def __call__( self , _lowerCAmelCase = 1 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase=True , ):
'''simple docstring'''
lowerCAmelCase__ :str = steps or self.get_default_steps()
self.scheduler.set_timesteps(_lowerCAmelCase )
lowerCAmelCase__ :Dict = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowerCAmelCase__ :Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowerCAmelCase__ :Optional[int] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_lowerCAmelCase , device=self.device , )
lowerCAmelCase__ :Union[str, Any] = noise
lowerCAmelCase__ :Any = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase__ :Dict = self.mel.audio_slice_to_image(_lowerCAmelCase )
lowerCAmelCase__ :List[str] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
lowerCAmelCase__ :Tuple = (input_image / 255) * 2 - 1
lowerCAmelCase__ :Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowerCAmelCase__ :str = self.vqvae.encode(torch.unsqueeze(_lowerCAmelCase , 0 ) ).latent_dist.sample(
generator=_lowerCAmelCase )[0]
lowerCAmelCase__ :Dict = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowerCAmelCase__ :Dict = self.scheduler.add_noise(_lowerCAmelCase , _lowerCAmelCase , self.scheduler.timesteps[start_step - 1] )
lowerCAmelCase__ :Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowerCAmelCase__ :Dict = int(mask_start_secs * pixels_per_second )
lowerCAmelCase__ :Tuple = int(mask_end_secs * pixels_per_second )
lowerCAmelCase__ :str = self.scheduler.add_noise(_lowerCAmelCase , _lowerCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _lowerCAmelCase ):
lowerCAmelCase__ :Optional[Any] = self.unet(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )["sample"]
else:
lowerCAmelCase__ :Dict = self.unet(_lowerCAmelCase , _lowerCAmelCase )["sample"]
if isinstance(self.scheduler , _lowerCAmelCase ):
lowerCAmelCase__ :Any = self.scheduler.step(
model_output=_lowerCAmelCase , timestep=_lowerCAmelCase , sample=_lowerCAmelCase , eta=_lowerCAmelCase , generator=_lowerCAmelCase , )["prev_sample"]
else:
lowerCAmelCase__ :List[str] = self.scheduler.step(
model_output=_lowerCAmelCase , timestep=_lowerCAmelCase , sample=_lowerCAmelCase , generator=_lowerCAmelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
lowerCAmelCase__ :List[Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
lowerCAmelCase__ :Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowerCAmelCase__ :Any = 1 / self.vqvae.config.scaling_factor * images
lowerCAmelCase__ :List[Any] = self.vqvae.decode(_lowerCAmelCase )["sample"]
lowerCAmelCase__ :Dict = (images / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase__ :Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowerCAmelCase__ :Optional[int] = (images * 255).round().astype("uint8" )
lowerCAmelCase__ :Optional[int] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_lowerCAmelCase , mode="RGB" ).convert("L" ) for _ in images) )
lowerCAmelCase__ :Optional[Any] = [self.mel.image_to_audio(_lowerCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_lowerCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(_lowerCAmelCase ) )
@torch.no_grad()
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , _lowerCAmelCase )
self.scheduler.set_timesteps(_lowerCAmelCase )
lowerCAmelCase__ :Any = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
lowerCAmelCase__ :Dict = (sample / 255) * 2 - 1
lowerCAmelCase__ :Optional[Any] = torch.Tensor(_lowerCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowerCAmelCase__ :List[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowerCAmelCase__ :Any = self.scheduler.alphas_cumprod[t]
lowerCAmelCase__ :List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowerCAmelCase__ :List[str] = 1 - alpha_prod_t
lowerCAmelCase__ :List[Any] = self.unet(_lowerCAmelCase , _lowerCAmelCase )["sample"]
lowerCAmelCase__ :int = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowerCAmelCase__ :List[Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowerCAmelCase__ :Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def snake_case_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = acos(torch.dot(torch.flatten(_lowerCAmelCase ) , torch.flatten(_lowerCAmelCase ) ) / torch.norm(_lowerCAmelCase ) / torch.norm(_lowerCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(_lowerCAmelCase ) + sin(alpha * theta ) * xa / sin(_lowerCAmelCase )
| 111 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[float] ):
return np.maximum(0 , UpperCamelCase_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 476 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def a__ ( UpperCamelCase_ : Tuple ):
UpperCAmelCase__ :Dict = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_, UpperCamelCase_ )
def a__ ( UpperCamelCase_ : Union[str, Any] ):
UpperCAmelCase__ , UpperCAmelCase__ :Optional[Any] = emb.weight.shape
UpperCAmelCase__ :int = nn.Linear(UpperCamelCase_, UpperCamelCase_, bias=UpperCamelCase_ )
UpperCAmelCase__ :Tuple = emb.weight.data
return lin_layer
def a__ ( UpperCamelCase_ : str ):
UpperCAmelCase__ :List[str] = torch.load(UpperCamelCase_, map_location='''cpu''' )
UpperCAmelCase__ :List[Any] = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
UpperCAmelCase__ :List[Any] = mam_aaa['''model''']
remove_ignore_keys_(UpperCamelCase_ )
UpperCAmelCase__ :Tuple = state_dict['''encoder.embed_tokens.weight'''].shape[0]
UpperCAmelCase__ :Union[str, Any] = MaMaaaConfig(
vocab_size=UpperCamelCase_, max_position_embeddings=1_024, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', )
UpperCAmelCase__ :str = state_dict['''decoder.embed_tokens.weight''']
UpperCAmelCase__ :Tuple = MaMaaaForConditionalGeneration(UpperCamelCase_ )
model.model.load_state_dict(UpperCamelCase_, strict=UpperCamelCase_ )
UpperCAmelCase__ :Optional[int] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 467 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__UpperCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class lowercase__:
'''simple docstring'''
snake_case__ = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
snake_case__ = field(
default=lowercase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
snake_case__ = field(
default=lowercase__ , metadata={'''help''': '''The column name of the images in the files.'''} )
snake_case__ = field(default=lowercase__ , metadata={'''help''': '''A folder containing the training data.'''} )
snake_case__ = field(default=lowercase__ , metadata={'''help''': '''A folder containing the validation data.'''} )
snake_case__ = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
snake_case__ = field(
default=lowercase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
snake_case__ = field(
default=lowercase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def UpperCAmelCase ( self) -> Dict:
"""simple docstring"""
UpperCamelCase__ : int ={}
if self.train_dir is not None:
UpperCamelCase__ : str =self.train_dir
if self.validation_dir is not None:
UpperCamelCase__ : str =self.validation_dir
UpperCamelCase__ : Any =data_files if data_files else None
@dataclass
class lowercase__:
'''simple docstring'''
snake_case__ = field(
default=lowercase__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
snake_case__ = field(
default=lowercase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} )
snake_case__ = field(
default=lowercase__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
snake_case__ = field(
default=lowercase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
snake_case__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
snake_case__ = field(default=lowercase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
snake_case__ = field(
default=lowercase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
snake_case__ = field(
default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} )
snake_case__ = field(
default=lowercase__ , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} )
@dataclass
class lowercase__( lowercase__ ):
'''simple docstring'''
snake_case__ = field(
default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} )
def _lowerCamelCase ( A_ : int ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Dict =torch.stack([example["pixel_values"] for example in examples] )
return {"pixel_values": pixel_values}
def _lowerCamelCase ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ : Optional[Any] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ : Tuple =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mae" , A_ , A_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ : Union[str, Any] =training_args.get_process_log_level()
logger.setLevel(A_ )
transformers.utils.logging.set_verbosity(A_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCamelCase__ : Tuple =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ : List[Any] =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
UpperCamelCase__ : int =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase__ : Any =None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , A_ ) and data_args.train_val_split > 0.0:
UpperCamelCase__ : Optional[Any] =ds['''train'''].train_test_split(data_args.train_val_split )
UpperCamelCase__ : Optional[Any] =split['''train''']
UpperCamelCase__ : Any =split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ : List[str] ={
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase__ : List[str] =ViTMAEConfig.from_pretrained(model_args.config_name , **A_ )
elif model_args.model_name_or_path:
UpperCamelCase__ : Optional[Any] =ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **A_ )
else:
UpperCamelCase__ : Optional[int] =ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase__ : Optional[int] =ViTImageProcessor.from_pretrained(model_args.image_processor_name , **A_ )
elif model_args.model_name_or_path:
UpperCamelCase__ : Optional[int] =ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **A_ )
else:
UpperCamelCase__ : Dict =ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase__ : Optional[Any] =ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
UpperCamelCase__ : Dict =ViTMAEForPreTraining(A_ )
if training_args.do_train:
UpperCamelCase__ : Union[str, Any] =ds['''train'''].column_names
else:
UpperCamelCase__ : Any =ds['''validation'''].column_names
if data_args.image_column_name is not None:
UpperCamelCase__ : int =data_args.image_column_name
elif "image" in column_names:
UpperCamelCase__ : int ='''image'''
elif "img" in column_names:
UpperCamelCase__ : List[Any] ='''img'''
else:
UpperCamelCase__ : Any =column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase__ : List[str] =image_processor.size['''shortest_edge''']
else:
UpperCamelCase__ : Union[str, Any] =(image_processor.size['''height'''], image_processor.size['''width'''])
UpperCamelCase__ : Union[str, Any] =Compose(
[
Lambda(lambda A_ : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(A_ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(A_ : int ):
UpperCamelCase__ : str =[transforms(A_ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
UpperCamelCase__ : Optional[Any] =ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(A_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
UpperCamelCase__ : Union[str, Any] =(
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(A_ )
# Compute absolute learning rate
UpperCamelCase__ : Union[str, Any] =(
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase__ : str =training_args.base_learning_rate * total_train_batch_size / 2_5_6
# Initialize our trainer
UpperCamelCase__ : Union[str, Any] =Trainer(
model=A_ , args=A_ , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=A_ , data_collator=A_ , )
# Training
if training_args.do_train:
UpperCamelCase__ : int =None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ : Union[str, Any] =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ : Optional[int] =last_checkpoint
UpperCamelCase__ : Any =trainer.train(resume_from_checkpoint=A_ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ : Optional[Any] =trainer.evaluate()
trainer.log_metrics("eval" , A_ )
trainer.save_metrics("eval" , A_ )
# Write model card and (optionally) push to hub
UpperCamelCase__ : Any ={
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**A_ )
else:
trainer.create_model_card(**A_ )
def _lowerCamelCase ( A_ : int ) -> Tuple:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 700 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _lowerCamelCase ( A_ : str = "isbn/0140328726" ) -> dict:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] =olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
UpperCamelCase__ : List[str] =f'''{olid} is not a valid Open Library olid'''
raise ValueError(A_ )
return requests.get(f'''https://openlibrary.org/{new_olid}.json''' ).json()
def _lowerCamelCase ( A_ : dict ) -> dict:
'''simple docstring'''
UpperCamelCase__ : Tuple ={
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
UpperCamelCase__ : List[Any] ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCamelCase__ : Any =[
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
UpperCamelCase__ : Optional[Any] =data["First sentence"]["value"]
for key, value in data.items():
if isinstance(A_ , A_ ):
UpperCamelCase__ : List[Any] =", ".join(A_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__UpperCAmelCase = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
__UpperCAmelCase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print("""\n""".join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 582 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase__ : List[str] = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowerCamelCase ( self : str ,UpperCamelCase : Any ,UpperCamelCase : Union[str, Any] ,UpperCamelCase : Optional[int] ) -> List[Any]:
_lowercase : Optional[Any] = TextaTextGenerationPipeline(model=UpperCamelCase ,tokenizer=UpperCamelCase )
return generator, ["Something to write", "Something else"]
def _lowerCamelCase ( self : Dict ,UpperCamelCase : Optional[Any] ,UpperCamelCase : int ) -> List[Any]:
_lowercase : Union[str, Any] = generator('Something there' )
self.assertEqual(UpperCamelCase ,[{'generated_text': ANY(UpperCamelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
_lowercase : List[str] = generator(['This is great !', 'Something else'] ,num_return_sequences=2 ,do_sample=UpperCamelCase )
self.assertEqual(
UpperCamelCase ,[
[{'generated_text': ANY(UpperCamelCase )}, {'generated_text': ANY(UpperCamelCase )}],
[{'generated_text': ANY(UpperCamelCase )}, {'generated_text': ANY(UpperCamelCase )}],
] ,)
_lowercase : Tuple = generator(
['This is great !', 'Something else'] ,num_return_sequences=2 ,batch_size=2 ,do_sample=UpperCamelCase )
self.assertEqual(
UpperCamelCase ,[
[{'generated_text': ANY(UpperCamelCase )}, {'generated_text': ANY(UpperCamelCase )}],
[{'generated_text': ANY(UpperCamelCase )}, {'generated_text': ANY(UpperCamelCase )}],
] ,)
with self.assertRaises(UpperCamelCase ):
generator(4 )
@require_torch
def _lowerCamelCase ( self : str ) -> str:
_lowercase : Dict = pipeline('text2text-generation' ,model='patrickvonplaten/t5-tiny-random' ,framework='pt' )
# do_sample=False necessary for reproducibility
_lowercase : str = generator('Something there' ,do_sample=UpperCamelCase )
self.assertEqual(UpperCamelCase ,[{'generated_text': ''}] )
_lowercase : int = 3
_lowercase : Any = generator(
'Something there' ,num_return_sequences=UpperCamelCase ,num_beams=UpperCamelCase ,)
_lowercase : Dict = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(UpperCamelCase ,UpperCamelCase )
_lowercase : Any = generator('This is a test' ,do_sample=UpperCamelCase ,num_return_sequences=2 ,return_tensors=UpperCamelCase )
self.assertEqual(
UpperCamelCase ,[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] ,)
_lowercase : Optional[int] = generator.model.config.eos_token_id
_lowercase : Dict = '<pad>'
_lowercase : str = generator(
['This is a test', 'This is a second test'] ,do_sample=UpperCamelCase ,num_return_sequences=2 ,batch_size=2 ,return_tensors=UpperCamelCase ,)
self.assertEqual(
UpperCamelCase ,[
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] ,)
@require_tf
def _lowerCamelCase ( self : List[Any] ) -> Any:
_lowercase : Tuple = pipeline('text2text-generation' ,model='patrickvonplaten/t5-tiny-random' ,framework='tf' )
# do_sample=False necessary for reproducibility
_lowercase : List[Any] = generator('Something there' ,do_sample=UpperCamelCase )
self.assertEqual(UpperCamelCase ,[{'generated_text': ''}] ) | 125 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase__ : Any = "mvp"
lowerCAmelCase__ : str = ["past_key_values"]
lowerCAmelCase__ : List[Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] ,UpperCamelCase : int=5_0267 ,UpperCamelCase : Any=1024 ,UpperCamelCase : List[str]=12 ,UpperCamelCase : Optional[Any]=4096 ,UpperCamelCase : Tuple=16 ,UpperCamelCase : int=12 ,UpperCamelCase : List[str]=4096 ,UpperCamelCase : Dict=16 ,UpperCamelCase : str=0.0 ,UpperCamelCase : str=0.0 ,UpperCamelCase : Tuple="gelu" ,UpperCamelCase : int=1024 ,UpperCamelCase : Union[str, Any]=0.1 ,UpperCamelCase : int=0.0 ,UpperCamelCase : int=0.0 ,UpperCamelCase : Tuple=0.0_2 ,UpperCamelCase : Tuple=0.0 ,UpperCamelCase : List[str]=False ,UpperCamelCase : Any=True ,UpperCamelCase : str=1 ,UpperCamelCase : Optional[int]=0 ,UpperCamelCase : Dict=2 ,UpperCamelCase : List[str]=True ,UpperCamelCase : Any=2 ,UpperCamelCase : Optional[int]=2 ,UpperCamelCase : List[Any]=False ,UpperCamelCase : str=100 ,UpperCamelCase : str=800 ,**UpperCamelCase : str ,) -> int:
_lowercase : Optional[int] = vocab_size
_lowercase : Tuple = max_position_embeddings
_lowercase : List[Any] = d_model
_lowercase : Any = encoder_ffn_dim
_lowercase : Optional[Any] = encoder_layers
_lowercase : Optional[int] = encoder_attention_heads
_lowercase : List[str] = decoder_ffn_dim
_lowercase : List[Any] = decoder_layers
_lowercase : int = decoder_attention_heads
_lowercase : Union[str, Any] = dropout
_lowercase : Optional[int] = attention_dropout
_lowercase : Union[str, Any] = activation_dropout
_lowercase : List[Any] = activation_function
_lowercase : Dict = init_std
_lowercase : Any = encoder_layerdrop
_lowercase : str = decoder_layerdrop
_lowercase : Tuple = classifier_dropout
_lowercase : Tuple = use_cache
_lowercase : int = encoder_layers
_lowercase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_lowercase : Any = use_prompt
_lowercase : Optional[int] = prompt_length
_lowercase : Any = prompt_mid_dim
super().__init__(
pad_token_id=UpperCamelCase ,bos_token_id=UpperCamelCase ,eos_token_id=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,decoder_start_token_id=UpperCamelCase ,forced_eos_token_id=UpperCamelCase ,**UpperCamelCase ,)
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' ,UpperCamelCase ):
_lowercase : List[Any] = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.' ) | 125 | 1 |
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A : Optional[int] = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Dict = AlbertTokenizer
SCREAMING_SNAKE_CASE_ : Optional[Any] = AlbertTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ : Tuple = True
def A ( self : str ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Tuple = AlbertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : List[Any] , A : str ) -> Tuple:
lowercase_ : Optional[int] = '''this is a test'''
lowercase_ : Dict = '''this is a test'''
return input_text, output_text
def A ( self : Optional[int] ) -> Optional[Any]:
lowercase_ : List[Any] = '''<pad>'''
lowercase_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def A ( self : Any ) -> Tuple:
lowercase_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(A ) , 3_00_00 )
def A ( self : List[Any] ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def A ( self : Tuple ) -> Any:
if not self.test_rust_tokenizer:
return
lowercase_ : int = self.get_tokenizer()
lowercase_ : List[Any] = self.get_rust_tokenizer()
lowercase_ : Any = '''I was born in 92000, and this is falsé.'''
lowercase_ : Union[str, Any] = tokenizer.tokenize(A )
lowercase_ : int = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
lowercase_ : List[str] = tokenizer.encode(A , add_special_tokens=A )
lowercase_ : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
lowercase_ : Any = self.get_rust_tokenizer()
lowercase_ : Union[str, Any] = tokenizer.encode(A )
lowercase_ : Tuple = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def A ( self : Optional[int] ) -> Optional[int]:
lowercase_ : Union[str, Any] = AlbertTokenizer(A , keep_accents=A )
lowercase_ : str = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 12_89] )
lowercase_ : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
A , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
lowercase_ : Any = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] )
lowercase_ : int = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def A ( self : Optional[int] ) -> Optional[Any]:
lowercase_ : List[Any] = AlbertTokenizer(A )
lowercase_ : Optional[int] = tokenizer.encode('''sequence builders''' )
lowercase_ : Optional[Any] = tokenizer.encode('''multi-sequence build''' )
lowercase_ : Any = tokenizer.build_inputs_with_special_tokens(A )
lowercase_ : Dict = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def A ( self : str ) -> Optional[int]:
# fmt: off
lowercase_ : Optional[Any] = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 721 |
"""simple docstring"""
from math import pow
def lowercase ( __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowercase_ : Dict = int(pow(__snake_case , __snake_case ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowercase_ , lowercase_ : Optional[int] = backtrack(
__snake_case , __snake_case , current_number + 1 , __snake_case , __snake_case )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowercase_ , lowercase_ : int = backtrack(
__snake_case , __snake_case , current_number + 1 , __snake_case , __snake_case )
return current_sum, solutions_count
def lowercase ( __snake_case : int , __snake_case : int ):
if not (1 <= needed_sum <= 1_0_0_0 and 2 <= power <= 1_0):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(__snake_case , __snake_case , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141 | 0 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: int ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCamelCase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod() | 6 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __lowercase , unittest.TestCase ):
_snake_case =CanineTokenizer
_snake_case =False
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
UpperCAmelCase_ =CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
return CanineTokenizer.from_pretrained("google/canine-s" )
def lowerCAmelCase__ ( self: Union[str, Any] , **_lowerCAmelCase: List[Any] ) -> CanineTokenizer:
'''simple docstring'''
UpperCAmelCase_ =self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
UpperCAmelCase_ =1024
return tokenizer
@require_torch
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
UpperCAmelCase_ =[5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , _lowerCAmelCase )
self.assertIn("attention_mask" , _lowerCAmelCase )
self.assertIn("token_type_ids" , _lowerCAmelCase )
@require_torch
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =[
"What's the weater?",
"It's about 25 degrees.",
]
UpperCAmelCase_ =tokenizer(
text_target=_lowerCAmelCase , max_length=32 , padding="max_length" , truncation=_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
UpperCAmelCase_ =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCAmelCase_ =chr(0xe0_07 )
additional_special_tokens.append(_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertIn(_lowerCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ , UpperCAmelCase_ =self.get_clean_sequence(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_05
UpperCAmelCase_ =chr(_lowerCAmelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
UpperCAmelCase_ =tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , input_encoded + special_token_id )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =chr(0xe0_05 )
UpperCAmelCase_ =chr(0xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_lowerCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(token_a[0] , _lowerCAmelCase )
self.assertEqual(token_a[0] , _lowerCAmelCase )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCAmelCase )
tokenizer.from_pretrained(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =[new_token_a]
UpperCAmelCase_ =[new_token_a]
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ =tokenizer_class.from_pretrained(_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
UpperCAmelCase_ =0xe0_07
UpperCAmelCase_ =chr(_lowerCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ =[AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )]
UpperCAmelCase_ =tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ ="hello world"
if self.space_between_special_tokens:
UpperCAmelCase_ ="[CLS] hello world [SEP]"
else:
UpperCAmelCase_ =input
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowerCAmelCase , [output, output.lower()] )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =[
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
UpperCAmelCase_ ="a"
UpperCAmelCase_ =ord(_lowerCAmelCase )
for attr in attributes_list:
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [] )
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: str ) -> str:
'''simple docstring'''
pass
| 54 | 0 |
'''simple docstring'''
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=9_9 , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=4 , snake_case="gelu" , snake_case=0.0 , snake_case=0.1 , snake_case=True , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : str = parent
UpperCAmelCase : Dict = batch_size
UpperCAmelCase : Tuple = seq_length
UpperCAmelCase : Optional[Any] = is_training
UpperCAmelCase : List[str] = use_input_mask
UpperCAmelCase : int = use_token_type_ids
UpperCAmelCase : List[str] = use_labels
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Optional[int] = intermediate_multiple_size
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : Optional[int] = hidden_dropout
UpperCAmelCase : Tuple = attention_dropout
UpperCAmelCase : str = weight_tying
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : Optional[int] = type_vocab_size
UpperCAmelCase : List[Any] = type_sequence_label_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Union[str, Any] = num_labels
UpperCAmelCase : Dict = num_choices
UpperCAmelCase : List[Any] = scope
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_input_mask:
UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, token_labels
def A_ ( self ):
'''simple docstring'''
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase : str = True
return config, input_ids, input_mask, token_labels
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Dict = GPTNeoXJapaneseModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Union[str, Any] = model(snake_case , attention_mask=snake_case )
UpperCAmelCase : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Any = True
UpperCAmelCase : Any = GPTNeoXJapaneseModel(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Any = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = GPTNeoXJapaneseForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Any = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Any = True
UpperCAmelCase : Any = GPTNeoXJapaneseForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case , use_cache=snake_case )
UpperCAmelCase : Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , output_hidden_states=snake_case )
UpperCAmelCase : Tuple = output_from_no_past["hidden_states"][0]
UpperCAmelCase : Any = model(
snake_case , attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )["hidden_states"][0]
# select random slice
UpperCAmelCase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase : str = config_and_inputs
UpperCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Any = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : str = (
{"feature-extraction": GPTNeoXJapaneseModel, "text-generation": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = GPTNeoXJapaneseModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self , config_class=snake_case , hidden_size=3_7 )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case , snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(snake_case , snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase : Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(snake_case , snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(snake_case , snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*snake_case )
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = "abeja/gpt-neox-japanese-2.7b"
UpperCAmelCase : List[str] = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
UpperCAmelCase : Union[str, Any] = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
UpperCAmelCase : List[Any] = GPTNeoXJapaneseTokenizer.from_pretrained(snake_case )
UpperCAmelCase : Any = GPTNeoXJapaneseForCausalLM.from_pretrained(snake_case )
UpperCAmelCase : Optional[int] = []
for prompt in prompts:
UpperCAmelCase : Any = tokenizer(snake_case , return_tensors="pt" ).input_ids
UpperCAmelCase : Any = model.generate(snake_case , max_length=5_0 )
UpperCAmelCase : str = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
predicted_outputs += generated_string
self.assertListEqual(snake_case , snake_case )
| 708 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
a : str = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Tuple = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase : int = g.get_repo("huggingface/diffusers" )
UpperCAmelCase : Union[str, Any] = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase : str = sorted(issue.get_comments() , key=lambda __magic_name__ : i.created_at , reverse=__magic_name__ )
UpperCAmelCase : Union[str, Any] = comments[0] if len(__magic_name__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 609 | 0 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 232 | """simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
a__ = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
a__ = AutoTokenizer.from_pretrained('xlm-roberta-base' )
a__ = 'The dog is cute and lives in the garden house'
a__ = jnp.array([tokenizer.encode(_snake_case )] )
a__ = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
a__ = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
a__ = model(_snake_case )['last_hidden_state']
self.assertEqual(output.shape , _snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _snake_case , atol=1E-3 ) )
| 232 | 1 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ = 10_00 ) -> Any:
return sum(2 * a * ((a - 1) // 2) for a in range(3, n + 1 ) )
if __name__ == "__main__":
print(solution())
| 719 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class A__ ( _snake_case ):
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 667 | 0 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( __snake_case : list[list[int]] ):
"""simple docstring"""
_lowerCamelCase : Dict = len(__snake_case )
# We need to create solution object to save path.
_lowerCamelCase : int = [[0 for _ in range(__snake_case )] for _ in range(__snake_case )]
_lowerCamelCase : str = run_maze(__snake_case , 0 , 0 , __snake_case )
if solved:
print("""\n""".join(str(__snake_case ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def _snake_case ( __snake_case : list[list[int]] , __snake_case : int , __snake_case : int , __snake_case : list[list[int]] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = len(__snake_case )
# Final check point.
if i == j == (size - 1):
_lowerCamelCase : List[str] = 1
return True
_lowerCamelCase : List[Any] = (not i < 0) and (not j < 0) # Check lower bounds
_lowerCamelCase : Union[str, Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_lowerCamelCase : List[str] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_lowerCamelCase : Union[str, Any] = 1
# check for directions
if (
run_maze(__snake_case , i + 1 , __snake_case , __snake_case )
or run_maze(__snake_case , __snake_case , j + 1 , __snake_case )
or run_maze(__snake_case , i - 1 , __snake_case , __snake_case )
or run_maze(__snake_case , __snake_case , j - 1 , __snake_case )
):
return True
_lowerCamelCase : str = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
@dataclass
class UpperCamelCase :
lowerCAmelCase : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
lowerCAmelCase : str = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
lowerCAmelCase : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase : bool = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __A ( self ):
A__ = self.task_name.lower()
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : int = """train"""
lowerCAmelCase : Tuple = """dev"""
lowerCAmelCase : Optional[Any] = """test"""
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : GlueDataTrainingArguments
lowerCAmelCase : str
lowerCAmelCase : List[InputFeatures]
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = Split.train , UpperCAmelCase__ = None , ):
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , UpperCAmelCase__ , )
A__ = args
A__ = glue_processors[args.task_name]()
A__ = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
A__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
A__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
A__ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A__ , A__ = label_list[2], label_list[1]
A__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A__ = cached_features_file + ".lock"
with FileLock(UpperCAmelCase__ ):
if os.path.exists(UpperCAmelCase__ ) and not args.overwrite_cache:
A__ = time.time()
A__ = torch.load(UpperCAmelCase__ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
A__ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
A__ = self.processor.get_test_examples(args.data_dir )
else:
A__ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
A__ = examples[:limit_length]
A__ = glue_convert_examples_to_features(
UpperCAmelCase__ , UpperCAmelCase__ , max_length=args.max_seq_length , label_list=UpperCAmelCase__ , output_mode=self.output_mode , )
A__ = time.time()
torch.save(self.features , UpperCAmelCase__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ):
return len(self.features )
def __getitem__( self , UpperCAmelCase__ ):
return self.features[i]
def __A ( self ):
return self.label_list
| 491 | 0 |
'''simple docstring'''
import numpy
# List of input, output pairs
UpperCAmelCase : List[Any] = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
UpperCAmelCase : Optional[int] = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
UpperCAmelCase : str = [2, 4, 1, 5]
UpperCAmelCase : int = len(train_data)
UpperCAmelCase : List[str] = 0.0_09
def _a ( lowerCAmelCase_ , lowerCAmelCase_="train" ):
"""simple docstring"""
return calculate_hypothesis_value(lowerCAmelCase_ , lowerCAmelCase_ ) - output(
lowerCAmelCase_ , lowerCAmelCase_ )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = 0
for i in range(len(lowerCAmelCase_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _a ( lowerCAmelCase_ , lowerCAmelCase_=m ):
"""simple docstring"""
_snake_case : Optional[int] = 0
for i in range(lowerCAmelCase_ ):
if index == -1:
summation_value += _error(lowerCAmelCase_ )
else:
summation_value += _error(lowerCAmelCase_ ) * train_data[i][0][index]
return summation_value
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[str] = summation_of_cost_derivative(lowerCAmelCase_ , lowerCAmelCase_ ) / m
return cost_derivative_value
def _a ( ):
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_snake_case : str = 0.000_002
_snake_case : Optional[int] = 0
_snake_case : Optional[int] = 0
while True:
j += 1
_snake_case : List[str] = [0, 0, 0, 0]
for i in range(0 , len(lowerCAmelCase_ ) ):
_snake_case : Optional[Any] = get_cost_derivative(i - 1 )
_snake_case : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCAmelCase_ , lowerCAmelCase_ , atol=lowerCAmelCase_ , rtol=lowerCAmelCase_ , ):
break
_snake_case : Any = temp_parameter_vector
print(('''Number of iterations:''', j) )
def _a ( ):
"""simple docstring"""
for i in range(len(lowerCAmelCase_ ) ):
print(('''Actual output value:''', output(lowerCAmelCase_ , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(lowerCAmelCase_ , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 47 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase (a__ ):
_lowercase : List[str] = """sew-d"""
def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__=2 , lowercase__=512 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-7 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ )
_snake_case : List[str] = hidden_size
_snake_case : Optional[Any] = feat_extract_norm
_snake_case : Tuple = feat_extract_activation
_snake_case : Tuple = list(lowercase__ )
_snake_case : Any = list(lowercase__ )
_snake_case : Any = list(lowercase__ )
_snake_case : Any = conv_bias
_snake_case : List[Any] = num_conv_pos_embeddings
_snake_case : Any = num_conv_pos_embedding_groups
_snake_case : Union[str, Any] = len(self.conv_dim )
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Optional[int] = intermediate_size
_snake_case : Any = squeeze_factor
_snake_case : Optional[Any] = max_position_embeddings
_snake_case : Tuple = position_buckets
_snake_case : Tuple = share_att_key
_snake_case : Any = relative_attention
_snake_case : Optional[int] = norm_rel_ebd
_snake_case : Optional[Any] = list(lowercase__ )
_snake_case : List[Any] = hidden_act
_snake_case : List[Any] = num_attention_heads
_snake_case : Dict = hidden_dropout
_snake_case : Tuple = attention_dropout
_snake_case : Union[str, Any] = activation_dropout
_snake_case : List[Any] = feat_proj_dropout
_snake_case : Optional[int] = final_dropout
_snake_case : Optional[Any] = layer_norm_eps
_snake_case : Dict = feature_layer_norm_eps
_snake_case : List[Any] = initializer_range
_snake_case : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_snake_case : Union[str, Any] = apply_spec_augment
_snake_case : Any = mask_time_prob
_snake_case : List[str] = mask_time_length
_snake_case : Dict = mask_time_min_masks
_snake_case : Union[str, Any] = mask_feature_prob
_snake_case : Tuple = mask_feature_length
_snake_case : Union[str, Any] = mask_feature_min_masks
# ctc loss
_snake_case : Optional[Any] = ctc_loss_reduction
_snake_case : Optional[Any] = ctc_zero_infinity
# sequence classification
_snake_case : List[Any] = use_weighted_layer_sum
_snake_case : Any = classifier_proj_size
@property
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 47 | 1 |
from __future__ import annotations
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = array[indexa], array[indexa]
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
if length > 1:
SCREAMING_SNAKE_CASE : Dict = int(length / 2 )
for i in range(__lowerCAmelCase , low + middle ):
comp_and_swap(__lowerCAmelCase , __lowerCAmelCase , i + middle , __lowerCAmelCase )
bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
bitonic_merge(__lowerCAmelCase , low + middle , __lowerCAmelCase , __lowerCAmelCase )
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
if length > 1:
SCREAMING_SNAKE_CASE : Any = int(length / 2 )
bitonic_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 )
bitonic_sort(__lowerCAmelCase , low + middle , __lowerCAmelCase , 0 )
bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = input("""Enter numbers separated by a comma:\n""").strip()
_lowerCamelCase : List[str] = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """) | 352 |
from datetime import datetime
import requests
def __a ( __lowerCAmelCase ) -> bytes:
SCREAMING_SNAKE_CASE : int = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
SCREAMING_SNAKE_CASE : Any = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(__lowerCAmelCase ).content
if __name__ == "__main__":
_lowerCamelCase : List[Any] = input("""Enter Video/IGTV url: """).strip()
_lowerCamelCase : int = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""") | 352 | 1 |
from __future__ import annotations
def UpperCamelCase ( snake_case_ : int = 4 ):
'''simple docstring'''
__snake_case :Any = abs(__lowerCAmelCase ) or 4
return [[1 + x + y * row_size for x in range(__lowerCAmelCase )] for y in range(__lowerCAmelCase )]
def UpperCamelCase ( snake_case_ : list[list[int]] ):
'''simple docstring'''
return reverse_row(transpose(__lowerCAmelCase ) )
# OR.. transpose(reverse_column(matrix))
def UpperCamelCase ( snake_case_ : list[list[int]] ):
'''simple docstring'''
return reverse_row(reverse_column(__lowerCAmelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def UpperCamelCase ( snake_case_ : list[list[int]] ):
'''simple docstring'''
return reverse_column(transpose(__lowerCAmelCase ) )
# OR.. transpose(reverse_row(matrix))
def UpperCamelCase ( snake_case_ : list[list[int]] ):
'''simple docstring'''
__snake_case :int = [list(__lowerCAmelCase ) for x in zip(*__lowerCAmelCase )]
return matrix
def UpperCamelCase ( snake_case_ : list[list[int]] ):
'''simple docstring'''
__snake_case :int = matrix[::-1]
return matrix
def UpperCamelCase ( snake_case_ : list[list[int]] ):
'''simple docstring'''
__snake_case :List[Any] = [x[::-1] for x in matrix]
return matrix
def UpperCamelCase ( snake_case_ : list[list[int]] ):
'''simple docstring'''
for i in matrix:
print(*__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
lowerCamelCase__ = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
lowerCamelCase__ = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 709 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class snake_case__ :
'''simple docstring'''
lowerCamelCase : int
lowerCamelCase : int
class snake_case__ :
'''simple docstring'''
def __init__( self , a__ ) -> Any:
'''simple docstring'''
__snake_case :list[list[Edge]] = [[] for _ in range(a__ )]
__snake_case :List[str] = size
def __getitem__( self , a__ ) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def __lowercase ( self ) -> str:
'''simple docstring'''
return self._size
def __lowercase ( self , a__ , a__ , a__ ) -> List[Any]:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(a__ , a__ ) )
def __lowercase ( self , a__ , a__ ) -> int | None:
'''simple docstring'''
__snake_case :Optional[Any] = deque([start_vertex] )
__snake_case :list[int | None] = [None] * self.size
__snake_case :Tuple = 0
while queue:
__snake_case :List[Any] = queue.popleft()
__snake_case :Optional[Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__snake_case :Optional[Any] = current_distance + edge.weight
__snake_case :Dict = distances[edge.destination_vertex]
if (
isinstance(a__ , a__ )
and new_distance >= dest_vertex_distance
):
continue
__snake_case :Dict = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291 | 0 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowercase : Tuple = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__lowercase : Any = (
subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
__lowercase : Optional[Any] = """|""".join(sys.argv[1:])
__lowercase : str = re.compile(rf'''^({joined_dirs}).*?\.py$''')
__lowercase : List[str] = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""") | 142 |
"""simple docstring"""
__lowercase : Union[str, Any] = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def lowerCamelCase_ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCamelCase_ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod() | 142 | 1 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
UpperCamelCase__ : int = {
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
UpperCamelCase__ : str = {
'''169M''': 7_68,
'''430M''': 10_24,
'''1B5''': 20_48,
'''3B''': 25_60,
'''7B''': 40_96,
'''14B''': 51_20,
}
def __UpperCAmelCase ( lowerCamelCase_ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = list(state_dict.keys() )
for name in state_dict_keys:
SCREAMING_SNAKE_CASE_ : Optional[int] = state_dict.pop(lowercase__ )
# emb -> embedding
if name.startswith('emb.' ):
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
SCREAMING_SNAKE_CASE_ : Any = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , lowercase__ )
# ffn -> feed_forward
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , lowercase__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
SCREAMING_SNAKE_CASE_ : str = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
SCREAMING_SNAKE_CASE_ : int = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
SCREAMING_SNAKE_CASE_ : List[str] = 'rwkv.' + name
SCREAMING_SNAKE_CASE_ : str = weight
return state_dict
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_=None ) -> int:
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
SCREAMING_SNAKE_CASE_ : List[str] = 5_02_77
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
SCREAMING_SNAKE_CASE_ : List[str] = PreTrainedTokenizerFast(tokenizer_file=lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(lowercase__ )
tokenizer.save_pretrained(lowercase__ )
# 2. Build the config
SCREAMING_SNAKE_CASE_ : Dict = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
SCREAMING_SNAKE_CASE_ : Tuple = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(F'`size` should be one of {possible_sizes}, got {size}.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = RwkvConfig(
vocab_size=lowercase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(lowercase__ )
# 3. Download model file then convert state_dict
SCREAMING_SNAKE_CASE_ : Dict = hf_hub_download(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : int = torch.load(lowercase__ , map_location='cpu' )
SCREAMING_SNAKE_CASE_ : List[Any] = convert_state_dict(lowercase__ )
# 4. Split in shards and save
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = shard_checkpoint(lowercase__ )
for shard_file, shard in shards.items():
torch.save(lowercase__ , os.path.join(lowercase__ , lowercase__ ) )
if index is not None:
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(lowercase__ , lowercase__ )
# Save the index as well
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : str = json.dumps(lowercase__ , indent=2 , sort_keys=lowercase__ ) + '\n'
f.write(lowercase__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
SCREAMING_SNAKE_CASE_ : List[str] = torch.load(os.path.join(lowercase__ , lowercase__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(lowercase__ , lowercase__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
SCREAMING_SNAKE_CASE_ : str = AutoModelForCausalLM.from_pretrained(lowercase__ )
model.push_to_hub(lowercase__ , max_shard_size='2GB' )
tokenizer.push_to_hub(lowercase__ )
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
UpperCamelCase__ : List[Any] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 719 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 685 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class __lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
_UpperCAmelCase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 452 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__magic_name__ : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ['''pixel_values''']
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
_snake_case = size if size is not None else {"shortest_edge": 256}
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = crop_size if crop_size is not None else {"height": 224, "width": 224}
_snake_case = get_size_dict(lowerCamelCase )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_snake_case = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowerCamelCase )
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
_snake_case = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
_snake_case = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
_snake_case = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 672 | 0 |
import random
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {i: [] for i in range(_SCREAMING_SNAKE_CASE )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_SCREAMING_SNAKE_CASE )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , _SCREAMING_SNAKE_CASE ):
if random.random() < probability:
graph[i].append(_SCREAMING_SNAKE_CASE )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_SCREAMING_SNAKE_CASE )
return graph
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_SCREAMING_SNAKE_CASE ) if i != j] for i in range(_SCREAMING_SNAKE_CASE )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 116 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : bool = False ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = scheduler
SCREAMING_SNAKE_CASE = optimizers if isinstance(lowerCamelCase__ ,(list, tuple) ) else [optimizers]
SCREAMING_SNAKE_CASE = split_batches
SCREAMING_SNAKE_CASE = step_with_optimizer
SCREAMING_SNAKE_CASE = GradientState()
def SCREAMING_SNAKE_CASE__ ( self : str ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Optional[int] ) -> Dict:
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowerCamelCase__ ,**lowerCamelCase__ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowerCamelCase__ ,**lowerCamelCase__ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
SCREAMING_SNAKE_CASE = AcceleratorState().num_processes
for _ in range(lowerCamelCase__ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler ,"""total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowerCamelCase__ ,**lowerCamelCase__ )
else:
self.scheduler.step(*lowerCamelCase__ ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
return self.scheduler.get_last_lr()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.scheduler.state_dict()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : str ) -> int:
'''simple docstring'''
self.scheduler.load_state_dict(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.scheduler.get_lr()
def SCREAMING_SNAKE_CASE__ ( self : Dict ,*lowerCamelCase__ : int ,**lowerCamelCase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
return self.scheduler.print_lr(*lowerCamelCase__ ,**lowerCamelCase__ )
| 116 | 1 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
lowerCamelCase_ = threading.Lock()
lowerCamelCase_ = None
lowerCamelCase_ = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
lowerCamelCase_ = logging.WARNING
lowerCamelCase_ = True
def lowerCamelCase ( ) -> str:
lowerCAmelCase_ = os.getenv('TRANSFORMERS_VERBOSITY' , a_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def lowerCamelCase ( ) -> str:
return __name__.split('.' )[0]
def lowerCamelCase ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def lowerCamelCase ( ) -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
lowerCAmelCase_ = logging.StreamHandler() # Set sys.stderr as stream.
lowerCAmelCase_ = sys.stderr.flush
# Apply our default configuration to the library root logger.
lowerCAmelCase_ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
lowerCAmelCase_ = False
def lowerCamelCase ( ) -> None:
global _default_handler
with _lock:
if not _default_handler:
return
lowerCAmelCase_ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
lowerCAmelCase_ = None
def lowerCamelCase ( ) -> Optional[Any]:
return log_levels
def lowerCamelCase ( a_ = None ) -> logging.Logger:
if name is None:
lowerCAmelCase_ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(a_ )
def lowerCamelCase ( ) -> int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def lowerCamelCase ( a_ ) -> None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(a_ )
def lowerCamelCase ( ) -> Optional[int]:
return set_verbosity(a_ )
def lowerCamelCase ( ) -> Optional[int]:
return set_verbosity(a_ )
def lowerCamelCase ( ) -> Union[str, Any]:
return set_verbosity(a_ )
def lowerCamelCase ( ) -> Optional[Any]:
return set_verbosity(a_ )
def lowerCamelCase ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def lowerCamelCase ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def lowerCamelCase ( a_ ) -> None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(a_ )
def lowerCamelCase ( a_ ) -> None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(a_ )
def lowerCamelCase ( ) -> None:
_configure_library_root_logger()
lowerCAmelCase_ = False
def lowerCamelCase ( ) -> None:
_configure_library_root_logger()
lowerCAmelCase_ = True
def lowerCamelCase ( ) -> None:
lowerCAmelCase_ = _get_library_root_logger().handlers
for handler in handlers:
lowerCAmelCase_ = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(a_ )
def lowerCamelCase ( ) -> None:
lowerCAmelCase_ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(a_ )
def lowerCamelCase ( self , *a_ , **a_ ) -> Any:
lowerCAmelCase_ = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , a_ )
if no_advisory_warnings:
return
self.warning(*a_ , **a_ )
lowerCamelCase_ = warning_advice
@functools.lru_cache(a_ )
def lowerCamelCase ( self , *a_ , **a_ ) -> List[Any]:
self.warning(*a_ , **a_ )
lowerCamelCase_ = warning_once
class a_ :
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ) -> Any: # pylint: disable=unused-argument
'''simple docstring'''
lowerCAmelCase_ = args[0] if args else None
def __iter__( self ) -> List[Any]:
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self , lowercase_ ) -> Any:
'''simple docstring'''
def empty_fn(*lowercase_ , **lowercase_ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> List[Any]:
'''simple docstring'''
return self
def __exit__( self , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
return
class a_ :
'''simple docstring'''
def __call__( self , *lowercase_ , **lowercase_ ) -> List[str]:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*lowercase_ , **lowercase_ )
else:
return EmptyTqdm(*lowercase_ , **lowercase_ )
def _lowercase ( self , *lowercase_ , **lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowercase_ , **lowercase_ )
def _lowercase ( self ) -> str:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCamelCase_ = _tqdm_cls()
def lowerCamelCase ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def lowerCamelCase ( ) -> Dict:
global _tqdm_active
lowerCAmelCase_ = True
hf_hub_utils.enable_progress_bars()
def lowerCamelCase ( ) -> str:
global _tqdm_active
lowerCAmelCase_ = False
hf_hub_utils.disable_progress_bars()
| 318 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowerCamelCase ( a_=None ) -> List[str]:
if subparsers is not None:
lowerCAmelCase_ = subparsers.add_parser('test' )
else:
lowerCAmelCase_ = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=a_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=a_ )
return parser
def lowerCamelCase ( a_ ) -> List[Any]:
lowerCAmelCase_ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
lowerCAmelCase_ = script_name
else:
lowerCAmelCase_ = F'''--config_file={args.config_file} {script_name}'''
lowerCAmelCase_ = ['accelerate-launch'] + test_args.split()
lowerCAmelCase_ = execute_subprocess_async(a_ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def lowerCamelCase ( ) -> Optional[Any]:
lowerCAmelCase_ = test_command_parser()
lowerCAmelCase_ = parser.parse_args()
test_command(a_ )
if __name__ == "__main__":
main()
| 318 | 1 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
A_ : List[str] = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
A_ : int = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
A_ : Optional[Any] = R"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def __UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = 0.0
for i, j in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
n_correct += 1.0 if math_equivalence.is_equiv(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else 0.0
snake_case__ : str = n_correct / len(__SCREAMING_SNAKE_CASE )
return {
"accuracy": accuracy,
}
| 704 |
'''simple docstring'''
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = val
snake_case__ : List[str] = None
snake_case__ : Tuple = None
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if self.val:
if val < self.val:
if self.left is None:
snake_case__ : Any = Node(__SCREAMING_SNAKE_CASE )
else:
self.left.insert(__SCREAMING_SNAKE_CASE )
elif val > self.val:
if self.right is None:
snake_case__ : List[Any] = Node(__SCREAMING_SNAKE_CASE )
else:
self.right.insert(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : Tuple = val
def UpperCamelCase__ ( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
if root:
inorder(root.left , __magic_name__ )
res.append(root.val )
inorder(root.right , __magic_name__ )
def UpperCamelCase__ ( __magic_name__ : Union[str, Any] ) -> str:
'''simple docstring'''
if len(__magic_name__ ) == 0:
return arr
snake_case__ : int = Node(arr[0] )
for i in range(1 , len(__magic_name__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
snake_case__ : str = []
inorder(__magic_name__ , __magic_name__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 419 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCAmelCase :
def __init__( self : Any , __lowercase : Union[str, Any] , __lowercase : List[str]=3 , __lowercase : List[str]=32 , __lowercase : Optional[Any]=3 , __lowercase : Union[str, Any]=10 , __lowercase : List[Any]=[8, 16, 32, 64] , __lowercase : Tuple=[1, 1, 2, 1] , __lowercase : Optional[int]=True , __lowercase : List[str]=True , __lowercase : Optional[Any]="relu" , __lowercase : Optional[Any]=3 , __lowercase : Any=None , __lowercase : Optional[Any]=["stage2", "stage3", "stage4"] , __lowercase : Union[str, Any]=[2, 3, 4] , __lowercase : str=1 , ):
"""simple docstring"""
__lowercase =parent
__lowercase =batch_size
__lowercase =image_size
__lowercase =num_channels
__lowercase =embeddings_size
__lowercase =hidden_sizes
__lowercase =depths
__lowercase =is_training
__lowercase =use_labels
__lowercase =hidden_act
__lowercase =num_labels
__lowercase =scope
__lowercase =len(__UpperCamelCase )
__lowercase =out_features
__lowercase =out_indices
__lowercase =num_groups
def snake_case ( self : Any ):
"""simple docstring"""
__lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase =None
if self.use_labels:
__lowercase =ids_tensor([self.batch_size] , self.num_labels )
__lowercase =self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def snake_case ( self : List[str] , __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : str ):
"""simple docstring"""
__lowercase =BitModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowercase =model(__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self : List[Any] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : Optional[int] ):
"""simple docstring"""
__lowercase =self.num_labels
__lowercase =BitForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowercase =model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : Any , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : List[Any] ):
"""simple docstring"""
__lowercase =BitBackbone(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowercase =model(__UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowercase =None
__lowercase =BitBackbone(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowercase =model(__UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self : Any ):
"""simple docstring"""
__lowercase =self.prepare_config_and_inputs()
__lowercase =config_and_inputs
__lowercase ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase_ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCAmelCase_ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def snake_case ( self : int ):
"""simple docstring"""
__lowercase =BitModelTester(self )
__lowercase =ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
return
@unittest.skip(reason='Bit does not output attentions' )
def snake_case ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def snake_case ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def snake_case ( self : Any ):
"""simple docstring"""
pass
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
__lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase =model_class(__UpperCamelCase )
__lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase =[*signature.parameters.keys()]
__lowercase =["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def snake_case ( self : List[Any] ):
"""simple docstring"""
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def snake_case ( self : int ):
"""simple docstring"""
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__UpperCamelCase )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
__lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase =model_class(config=__UpperCamelCase )
for name, module in model.named_modules():
if isinstance(__UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def snake_case ( self : List[Any] ):
"""simple docstring"""
def check_hidden_states_output(__lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : List[str] ):
__lowercase =model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
__lowercase =model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
__lowercase =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase =self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowercase =self.model_tester.prepare_config_and_inputs_for_common()
__lowercase =["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowercase =layer_type
__lowercase =True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase =True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
pass
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def snake_case ( self : str ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase =BitModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : Any ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def snake_case ( self : Optional[int] ):
"""simple docstring"""
__lowercase =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__UpperCamelCase )
__lowercase =self.default_image_processor
__lowercase =prepare_img()
__lowercase =image_processor(images=__UpperCamelCase , return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__lowercase =model(**__UpperCamelCase )
# verify the logits
__lowercase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
__lowercase =torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
@require_torch
class lowerCAmelCase ( snake_case_ , unittest.TestCase ):
lowerCAmelCase_ = (BitBackbone,) if is_torch_available() else ()
lowerCAmelCase_ = BitConfig
lowerCAmelCase_ = False
def snake_case ( self : Optional[int] ):
"""simple docstring"""
__lowercase =BitModelTester(self )
| 119 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class _snake_case ( snake_case_ ):
'''simple docstring'''
__snake_case = "t5"
__snake_case = ["past_key_values"]
__snake_case = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self: Optional[int] , __UpperCamelCase: Any=3_2128 , __UpperCamelCase: Any=512 , __UpperCamelCase: Optional[Any]=64 , __UpperCamelCase: Any=2048 , __UpperCamelCase: List[Any]=6 , __UpperCamelCase: Union[str, Any]=None , __UpperCamelCase: List[str]=8 , __UpperCamelCase: Tuple=32 , __UpperCamelCase: Optional[Any]=128 , __UpperCamelCase: List[Any]=0.1 , __UpperCamelCase: Dict=1E-6 , __UpperCamelCase: int=1.0 , __UpperCamelCase: Optional[int]="relu" , __UpperCamelCase: int=True , __UpperCamelCase: str=True , __UpperCamelCase: List[Any]=0 , __UpperCamelCase: Any=1 , **__UpperCamelCase: Union[str, Any] , ) -> Optional[int]:
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Any = d_model
__magic_name__ : List[str] = d_kv
__magic_name__ : List[Any] = d_ff
__magic_name__ : Optional[int] = num_layers
__magic_name__ : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__magic_name__ : str = num_heads
__magic_name__ : Any = relative_attention_num_buckets
__magic_name__ : List[str] = relative_attention_max_distance
__magic_name__ : int = dropout_rate
__magic_name__ : Optional[Any] = layer_norm_epsilon
__magic_name__ : Tuple = initializer_factor
__magic_name__ : int = feed_forward_proj
__magic_name__ : Optional[int] = use_cache
__magic_name__ : Any = self.feed_forward_proj.split("-" )
__magic_name__ : Any = act_info[-1]
__magic_name__ : Dict = act_info[0] == "gated"
if len(__UpperCamelCase ) > 1 and act_info[0] != "gated" or len(__UpperCamelCase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__magic_name__ : List[str] = "gelu_new"
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase , )
class _snake_case ( snake_case_ ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Mapping[str, Mapping[int, str]]:
__magic_name__ : str = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
__magic_name__ : Union[str, Any] = "past_encoder_sequence + sequence"
__magic_name__ : List[Any] = {0: "batch"}
__magic_name__ : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__magic_name__ : int = {0: "batch", 1: "decoder_sequence"}
__magic_name__ : List[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
return common_inputs
@property
def lowerCAmelCase__ ( self: Optional[int] ) -> int:
return 13 | 436 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class A :
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = None
lowerCamelCase = None
def _snake_case( ) -> Node | None:
'''simple docstring'''
A__ = Node(1 )
A__ = Node(2 )
A__ = Node(3 )
A__ = Node(4 )
A__ = Node(5 )
return tree
def _snake_case( SCREAMING_SNAKE_CASE__ : Node | None ) -> list[int]:
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _snake_case( SCREAMING_SNAKE_CASE__ : Node | None ) -> list[int]:
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _snake_case( SCREAMING_SNAKE_CASE__ : Node | None ) -> list[int]:
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _snake_case( SCREAMING_SNAKE_CASE__ : Node | None ) -> int:
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _snake_case( SCREAMING_SNAKE_CASE__ : Node | None ) -> Sequence[Node | None]:
'''simple docstring'''
A__ = []
if root is None:
return output
A__ = deque([root] )
while process_queue:
A__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _snake_case( SCREAMING_SNAKE_CASE__ : Node | None , SCREAMING_SNAKE_CASE__ : int ) -> Sequence[Node | None]:
'''simple docstring'''
A__ = []
def populate_output(SCREAMING_SNAKE_CASE__ : Node | None , SCREAMING_SNAKE_CASE__ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return output
def _snake_case( SCREAMING_SNAKE_CASE__ : Node | None , SCREAMING_SNAKE_CASE__ : int ) -> Sequence[Node | None]:
'''simple docstring'''
A__ = []
def populate_output(SCREAMING_SNAKE_CASE__ : Node | None , SCREAMING_SNAKE_CASE__ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return output
def _snake_case( SCREAMING_SNAKE_CASE__ : Node | None ) -> Sequence[Node | None] | list[Any]:
'''simple docstring'''
if root is None:
return []
A__ = []
A__ = 0
A__ = height(SCREAMING_SNAKE_CASE__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
A__ = 1
else:
output.append(get_nodes_from_right_to_left(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
A__ = 0
return output
def _snake_case( ) -> None: # Main function for testing.
'''simple docstring'''
A__ = make_tree()
print(f'In-order Traversal: {inorder(SCREAMING_SNAKE_CASE__ )}' )
print(f'Pre-order Traversal: {preorder(SCREAMING_SNAKE_CASE__ )}' )
print(f'Post-order Traversal: {postorder(SCREAMING_SNAKE_CASE__ )}' , '\n' )
print(f'Height of Tree: {height(SCREAMING_SNAKE_CASE__ )}' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(SCREAMING_SNAKE_CASE__ ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(SCREAMING_SNAKE_CASE__ ) + 1 ):
print(f'Level {level}:' , get_nodes_from_left_to_right(SCREAMING_SNAKE_CASE__ , level=SCREAMING_SNAKE_CASE__ ) )
print('\nZigZag order Traversal: ' )
print(zigzag(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 586 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class A :
"""simple docstring"""
def __init__( self : List[Any],lowercase_ : Dict,lowercase_ : Optional[Any]=1_3,lowercase_ : str=7,lowercase_ : Optional[Any]=True,lowercase_ : Dict=True,lowercase_ : Union[str, Any]=True,lowercase_ : List[Any]=True,lowercase_ : Any=9_9,lowercase_ : Dict=3_2,lowercase_ : str=2,lowercase_ : str=4,lowercase_ : Any=3_7,lowercase_ : Union[str, Any]="gelu",lowercase_ : Union[str, Any]=0.1,lowercase_ : Optional[int]=0.1,lowercase_ : Optional[int]=5_1_2,lowercase_ : Optional[int]=1_6,lowercase_ : str=2,lowercase_ : Optional[int]=0.02,lowercase_ : Union[str, Any]=3,lowercase_ : Optional[Any]=4,lowercase_ : Dict=None,)-> List[Any]:
'''simple docstring'''
A__ = parent
A__ = 1_3
A__ = 7
A__ = True
A__ = True
A__ = True
A__ = True
A__ = 9_9
A__ = 3_2
A__ = 2
A__ = 4
A__ = 3_7
A__ = 'gelu'
A__ = 0.1
A__ = 0.1
A__ = 5_1_2
A__ = 1_6
A__ = 2
A__ = 0.02
A__ = 3
A__ = 4
A__ = None
def snake_case__ ( self : Any )-> Any:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
A__ = ids_tensor([self.batch_size],self.num_choices )
A__ = RoFormerConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=lowercase_,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Union[str, Any],lowercase_ : int,lowercase_ : Dict,lowercase_ : int,lowercase_ : Any,lowercase_ : List[str],lowercase_ : Optional[Any],lowercase_ : Optional[int] )-> Any:
'''simple docstring'''
A__ = TFRoFormerModel(config=lowercase_ )
A__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A__ = [input_ids, input_mask]
A__ = model(lowercase_ )
A__ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Tuple,lowercase_ : Optional[Any],lowercase_ : int,lowercase_ : Tuple,lowercase_ : Optional[int],lowercase_ : List[Any],lowercase_ : Union[str, Any],lowercase_ : str )-> Tuple:
'''simple docstring'''
A__ = True
A__ = TFRoFormerForCausalLM(config=lowercase_ )
A__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A__ = model(lowercase_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ),[self.batch_size, self.seq_length, self.vocab_size] )
def snake_case__ ( self : Tuple,lowercase_ : Any,lowercase_ : Optional[Any],lowercase_ : Dict,lowercase_ : Optional[Any],lowercase_ : Optional[Any],lowercase_ : Any,lowercase_ : int )-> Any:
'''simple docstring'''
A__ = TFRoFormerForMaskedLM(config=lowercase_ )
A__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[Any],lowercase_ : Union[str, Any],lowercase_ : str,lowercase_ : Optional[Any],lowercase_ : Union[str, Any],lowercase_ : Tuple,lowercase_ : List[str],lowercase_ : Union[str, Any] )-> List[str]:
'''simple docstring'''
A__ = self.num_labels
A__ = TFRoFormerForSequenceClassification(config=lowercase_ )
A__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def snake_case__ ( self : List[Any],lowercase_ : List[str],lowercase_ : Tuple,lowercase_ : Tuple,lowercase_ : Tuple,lowercase_ : List[str],lowercase_ : Optional[int],lowercase_ : Dict )-> List[str]:
'''simple docstring'''
A__ = self.num_choices
A__ = TFRoFormerForMultipleChoice(config=lowercase_ )
A__ = tf.tile(tf.expand_dims(lowercase_,1 ),(1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(lowercase_,1 ),(1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(lowercase_,1 ),(1, self.num_choices, 1) )
A__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def snake_case__ ( self : Tuple,lowercase_ : Dict,lowercase_ : Tuple,lowercase_ : Tuple,lowercase_ : Union[str, Any],lowercase_ : Dict,lowercase_ : List[Any],lowercase_ : List[str] )-> Optional[Any]:
'''simple docstring'''
A__ = self.num_labels
A__ = TFRoFormerForTokenClassification(config=lowercase_ )
A__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : Optional[Any],lowercase_ : Optional[Any],lowercase_ : Any,lowercase_ : Tuple,lowercase_ : List[str],lowercase_ : List[Any],lowercase_ : Tuple,lowercase_ : Tuple )-> Optional[Any]:
'''simple docstring'''
A__ = TFRoFormerForQuestionAnswering(config=lowercase_ )
A__ = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A__ = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def snake_case__ ( self : Union[str, Any] )-> Tuple:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
def snake_case__ ( self : Dict,lowercase_ : str,lowercase_ : Union[str, Any],lowercase_ : int,lowercase_ : Tuple,lowercase_ : Any )-> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def snake_case__ ( self : Tuple )-> Optional[Any]:
'''simple docstring'''
A__ = TFRoFormerModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,hidden_size=3_7 )
def snake_case__ ( self : str )-> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : str )-> str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def snake_case__ ( self : List[str] )-> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def snake_case__ ( self : Optional[int] )-> str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowercase_ )
def snake_case__ ( self : Optional[int] )-> List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def snake_case__ ( self : Optional[Any] )-> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def snake_case__ ( self : Tuple )-> Any:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def snake_case__ ( self : List[str] )-> Tuple:
'''simple docstring'''
A__ = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(lowercase_ )
@require_tf
class A ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : List[Any] )-> Optional[int]:
'''simple docstring'''
A__ = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
A__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A__ = model(lowercase_ )[0]
# TODO Replace vocab size
A__ = 5_0_0_0_0
A__ = [1, 6, vocab_size]
self.assertEqual(output.shape,lowercase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
A__ = tf.constant(
[
[
[-0.12_053_341, -1.0_264_901, 0.29_221_946],
[-1.5_133_783, 0.197_433, 0.15_190_607],
[-5.0_135_403, -3.900_256, -0.84_038_764],
]
] )
tf.debugging.assert_near(output[:, :3, :3],lowercase_,atol=1E-4 )
@require_tf
class A ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = 1E-4
def snake_case__ ( self : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
A__ = tf.constant([[4, 1_0]] )
A__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6,embedding_dim=6 )
A__ = emba(input_ids.shape )
A__ = tf.constant(
[[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] )
tf.debugging.assert_near(lowercase_,lowercase_,atol=self.tolerance )
def snake_case__ ( self : List[Any] )-> List[str]:
'''simple docstring'''
A__ = tf.constant(
[
[0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000],
[0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617],
[0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870],
] )
A__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2,embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
A__ = emba.weight[:3, :5]
tf.debugging.assert_near(lowercase_,lowercase_,atol=self.tolerance )
@require_tf
class A ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = 1E-4
def snake_case__ ( self : Union[str, Any] )-> str:
'''simple docstring'''
A__ = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4,dtype=tf.floataa ),shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
A__ = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4,dtype=tf.floataa ),shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
A__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2,embedding_dim=6_4 )
A__ = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
A__ , A__ = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowercase_,lowercase_,lowercase_ )
A__ = tf.constant(
[
[0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700],
[-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343],
[-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985],
[-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871],
[0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980],
[3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253],
] )
A__ = tf.constant(
[
[0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700],
[0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343],
[1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985],
[2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871],
[-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980],
[-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8],lowercase_,atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8],lowercase_,atol=self.tolerance )
| 586 | 1 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__a = imread(R"digital_image_processing/image_data/lena_small.jpg")
__a = cvtColor(img, COLOR_BGR2GRAY)
def __snake_case( ) -> List[Any]:
snake_case__ : Optional[Any] = cn.convert_to_negative(_lowerCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def __snake_case( ) -> Dict:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_lowerCAmelCase , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def __snake_case( ) -> int:
snake_case__ : Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __snake_case( ) -> List[Any]:
snake_case__ : Any = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
snake_case__ : List[str] = canny.canny(_lowerCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def __snake_case( ) -> List[str]:
assert gg.gaussian_filter(_lowerCAmelCase , 5 , sigma=0.9 ).all()
def __snake_case( ) -> Tuple:
# laplace diagonals
snake_case__ : Optional[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
snake_case__ : Optional[int] = conv.img_convolve(_lowerCAmelCase , _lowerCAmelCase ).astype(_lowerCAmelCase )
assert res.any()
def __snake_case( ) -> List[str]:
assert med.median_filter(_lowerCAmelCase , 3 ).any()
def __snake_case( ) -> Optional[int]:
snake_case__ , snake_case__ : str = sob.sobel_filter(_lowerCAmelCase )
assert grad.any() and theta.any()
def __snake_case( ) -> Tuple:
snake_case__ : Tuple = sp.make_sepia(_lowerCAmelCase , 20 )
assert sepia.all()
def __snake_case( _lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" ) -> Union[str, Any]:
snake_case__ : Any = bs.Burkes(imread(_lowerCAmelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def __snake_case( _lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" , ) -> int:
snake_case__ : str = rs.NearestNeighbour(imread(_lowerCAmelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def __snake_case( ) -> Union[str, Any]:
snake_case__ : str = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
snake_case__ : List[Any] = imread(_lowerCAmelCase , 0 )
# Test for get_neighbors_pixel function() return not None
snake_case__ : Optional[int] = 0
snake_case__ : Union[str, Any] = 0
snake_case__ : Union[str, Any] = image[x_coordinate][y_coordinate]
snake_case__ : Dict = lbp.get_neighbors_pixel(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
snake_case__ : Tuple = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
snake_case__ : Dict = lbp.local_binary_value(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
assert lbp_image.any()
| 374 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : int = get_activation("""swish""" )
self.assertIsInstance(snake_case_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : int = get_activation("""silu""" )
self.assertIsInstance(snake_case_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase ( self : Dict ):
snake_case__ : Union[str, Any] = get_activation("""mish""" )
self.assertIsInstance(snake_case_ , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : int = get_activation("""gelu""" )
self.assertIsInstance(snake_case_ , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 374 | 1 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase( _a , unittest.TestCase):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase( unittest.TestCase):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self )-> Any:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE__ ( self )-> int:
__A = ort.SessionOptions()
__A = False
return options
def SCREAMING_SNAKE_CASE__ ( self )-> Union[str, Any]:
__A = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__A = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__A = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=UpperCAmelCase , feature_extractor=UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__A = '''A red cat sitting on a park bench'''
__A = np.random.RandomState(0 )
__A = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase , output_type='''np''' , )
__A = output.images
__A = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__A = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE__ ( self )-> int:
__A = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__A = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__A = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__A = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase , feature_extractor=UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__A = '''A red cat sitting on a park bench'''
__A = np.random.RandomState(0 )
__A = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase , output_type='''np''' , )
__A = output.images
__A = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__A = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 341 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def __UpperCamelCase ( snake_case ) -> Any:
'''simple docstring'''
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set() )
@pytest.fixture
def __UpperCamelCase ( snake_case ) -> List[str]:
'''simple docstring'''
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , UpperCAmelCase )-> List[str]:
__A = metric_id
class _lowerCAmelCase:
"""simple docstring"""
lowerCamelCase__ = [MetricMock(_a) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def SCREAMING_SNAKE_CASE__ ( self )-> Dict:
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock() )
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def __UpperCamelCase ( snake_case , snake_case , snake_case , snake_case , snake_case ) -> str:
'''simple docstring'''
if "tmp_path" in args:
__A = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(snake_case , match='''https://huggingface.co/docs/evaluate''' ):
func(*snake_case )
| 341 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.