code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 40 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCAmelCase_ = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
else:
UpperCAmelCase_ = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
UpperCAmelCase_ = ["key_proj", "value_proj", "query_proj"]
UpperCAmelCase_ = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
UpperCAmelCase_ = key.split("." )
if attributes[0] == "lm_head":
UpperCAmelCase_ = prophet
UpperCAmelCase_ = prophet_old
else:
UpperCAmelCase_ = prophet.prophetnet
UpperCAmelCase_ = prophet_old.model
UpperCAmelCase_ = False
for attribute in attributes:
if attribute in mapping:
UpperCAmelCase_ = mapping[attribute]
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_ = attribute
elif hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCAmelCase_ = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
UpperCAmelCase_ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCAmelCase_ = old_model.bias
logger.info(f"""{attribute} is initialized""" )
UpperCAmelCase_ = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase__ , "in_proj_weight" ):
UpperCAmelCase_ = old_model.in_proj_weight.shape[0] // 3
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCAmelCase_ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
UpperCAmelCase_ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
UpperCAmelCase_ = True
break
if attribute.isdigit():
UpperCAmelCase_ = model[int(lowerCAmelCase__ )]
UpperCAmelCase_ = old_model[int(lowerCAmelCase__ )]
else:
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if old_attribute == "":
UpperCAmelCase_ = old_model
else:
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 82 | 0 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__magic_name__ : List[str] = logging.getLogger()
def lowercase__ ( ) -> str:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('-f')
UpperCamelCase = parser.parse_args()
return args.f
def lowercase__ ( _UpperCamelCase) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = {}
UpperCamelCase = os.path.join(_UpperCamelCase , 'all_results.json')
if os.path.exists(_UpperCamelCase):
with open(_UpperCamelCase , 'r') as f:
UpperCamelCase = json.load(_UpperCamelCase)
else:
raise ValueError(F'can\'t find {path}')
return results
def lowercase__ ( ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = torch.cuda.is_available() and torch_device == 'cuda'
return is_using_cuda and is_apex_available()
__magic_name__ : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A__ ( UpperCamelCase__ ):
'''simple docstring'''
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = os.path.join(cls.tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
UpperCamelCase = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int ):
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'glue_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result['perplexity'] , 100 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'clm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result['perplexity'] , 42 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'mlm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = 7 if get_gpu_count() > 1 else 2
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
self.assertLess(result['train_loss'] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'ner_no_trainer' ) ) )
@unittest.skip(reason='Fix me @muellerzr' )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['eval_f1'] , 28 )
self.assertGreaterEqual(result['eval_exact'] , 28 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'qa_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result['eval_accuracy'] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'swag_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result['eval_rouge1'] , 10 )
self.assertGreaterEqual(result['eval_rouge2'] , 2 )
self.assertGreaterEqual(result['eval_rougeL'] , 7 )
self.assertGreaterEqual(result['eval_rougeLsum'] , 7 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'summarization_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result['eval_bleu'] , 30 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'translation_no_trainer' ) ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.1_0 )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
# The base model scores a 25%
self.assertGreaterEqual(result['eval_accuracy'] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'step_1' ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'image_classification_no_trainer' ) ) )
| 717 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__magic_name__ : List[Any] = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[str] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__magic_name__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 410 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class _a ( UpperCamelCase__ ):
_lowercase : List[str] = '''layoutlmv3'''
def __init__( self: Optional[Any] , UpperCamelCase_: Union[str, Any]=50_265 , UpperCamelCase_: Tuple=768 , UpperCamelCase_: int=12 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: List[Any]=3_072 , UpperCamelCase_: Any="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: str=512 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: str=0.02 , UpperCamelCase_: int=1E-5 , UpperCamelCase_: List[Any]=1 , UpperCamelCase_: List[str]=0 , UpperCamelCase_: str=2 , UpperCamelCase_: Any=1_024 , UpperCamelCase_: List[str]=128 , UpperCamelCase_: List[str]=128 , UpperCamelCase_: Tuple=True , UpperCamelCase_: Any=32 , UpperCamelCase_: Any=128 , UpperCamelCase_: Optional[int]=64 , UpperCamelCase_: str=256 , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Optional[int]=224 , UpperCamelCase_: Any=3 , UpperCamelCase_: int=16 , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
vocab_size=UpperCamelCase_ , hidden_size=UpperCamelCase_ , num_hidden_layers=UpperCamelCase_ , num_attention_heads=UpperCamelCase_ , intermediate_size=UpperCamelCase_ , hidden_act=UpperCamelCase_ , hidden_dropout_prob=UpperCamelCase_ , attention_probs_dropout_prob=UpperCamelCase_ , max_position_embeddings=UpperCamelCase_ , type_vocab_size=UpperCamelCase_ , initializer_range=UpperCamelCase_ , layer_norm_eps=UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase__ = max_ad_position_embeddings
lowercase__ = coordinate_size
lowercase__ = shape_size
lowercase__ = has_relative_attention_bias
lowercase__ = rel_pos_bins
lowercase__ = max_rel_pos
lowercase__ = has_spatial_attention_bias
lowercase__ = rel_ad_pos_bins
lowercase__ = max_rel_ad_pos
lowercase__ = text_embed
lowercase__ = visual_embed
lowercase__ = input_size
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = classifier_dropout
class _a ( UpperCamelCase__ ):
_lowercase : List[str] = version.parse('''1.12''' )
@property
def lowerCamelCase_ ( self: int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def lowerCamelCase_ ( self: Any ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
return 12
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: "ProcessorMixin" , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional["TensorType"] = None , UpperCamelCase_: int = 3 , UpperCamelCase_: int = 40 , UpperCamelCase_: int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , '''apply_ocr''' , UpperCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__ = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase_ )
lowercase__ = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
lowercase__ = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowercase__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase__ = self._generate_dummy_images(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = dict(
processor(
UpperCamelCase_ , text=UpperCamelCase_ , boxes=UpperCamelCase_ , return_tensors=UpperCamelCase_ , ) )
return inputs
| 43 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Optional[int] ) -> Dict:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : int ,__a : List[str]="attention" ) -> List[str]:
"""simple docstring"""
_a : str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_a : Tuple = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_a : Any = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_a : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_a : Union[str, Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_a : Any = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_a : Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_a : int = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Union[str, Any] ,__a : List[Any] ,__a : Any=False ) -> Any:
"""simple docstring"""
if split_mlp_wi:
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_a : List[str] = (wi_a, wi_a)
else:
_a : List[str] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_a : Optional[int] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Union[str, Any] ,__a : str ) -> List[str]:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __UpperCAmelCase ( __a : dict ,*, __a : int ,__a : bool ,__a : bool = False ) -> Any:
"""simple docstring"""
_a : Dict = traverse_util.flatten_dict(variables['''target'''] )
_a : Any = {'''/'''.join(__a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_a : Optional[int] = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,__a )
_a : Tuple = collections.OrderedDict()
# Shared embeddings.
_a : Any = old['''token_embedder/embedding''']
# Encoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Optional[Any] = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_attention_layer_norm''' )
_a , _a , _a , _a : List[str] = tax_attention_lookup(__a ,__a ,'''encoder''' ,'''attention''' )
_a : List[str] = layer_norm
_a : Optional[Any] = k.T
_a : str = o.T
_a : List[Any] = q.T
_a : Tuple = v.T
# Block i, layer 1 (MLP).
_a : str = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Any = tax_mlp_lookup(__a ,__a ,'''encoder''' ,__a )
_a : str = layer_norm
if split_mlp_wi:
_a : List[Any] = wi[0].T
_a : Any = wi[1].T
else:
_a : Any = wi.T
_a : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Dict = tax_relpos_bias_lookup(
__a ,__a ,'''encoder''' ).T
_a : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_a : List[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''encoder''' ).T
_a : Optional[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Union[str, Any] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_a , _a , _a , _a : Optional[Any] = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''self_attention''' )
_a : Optional[Any] = layer_norm
_a : Dict = k.T
_a : str = o.T
_a : str = q.T
_a : List[str] = v.T
# Block i, layer 1 (Cross Attention).
_a : Any = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_a , _a , _a , _a : str = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''encoder_decoder_attention''' )
_a : Optional[Any] = layer_norm
_a : Optional[int] = k.T
_a : Dict = o.T
_a : str = q.T
_a : int = v.T
# Block i, layer 2 (MLP).
_a : Optional[int] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Tuple = tax_mlp_lookup(__a ,__a ,'''decoder''' ,__a )
_a : Optional[Any] = layer_norm
if split_mlp_wi:
_a : List[str] = wi[0].T
_a : List[Any] = wi[1].T
else:
_a : Dict = wi.T
_a : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Tuple = tax_relpos_bias_lookup(__a ,__a ,'''decoder''' ).T
_a : Tuple = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_a : Any = old['''decoder/logits_dense/kernel'''].T
return new
def __UpperCAmelCase ( __a : Dict ,__a : bool ) -> Tuple:
"""simple docstring"""
_a : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_a : Any = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_a : Optional[int] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_a : str = state_dict['''shared.weight''']
return state_dict
def __UpperCAmelCase ( __a : List[str] ,__a : Union[str, Any] ,__a : Dict ,__a : Union[str, Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
_a : List[str] = checkpoints.load_tax_checkpoint(__a )
_a : str = convert_tax_to_pytorch(
__a ,num_layers=config.num_layers ,is_encoder_only=__a ,scalable_attention=__a )
_a : str = make_state_dict(__a ,__a )
model.load_state_dict(__a ,strict=__a )
def __UpperCAmelCase ( __a : List[Any] ,__a : Any ,__a : Union[str, Any] ,__a : bool = False ,__a : bool = False ,) -> Optional[Any]:
"""simple docstring"""
_a : List[str] = MTaConfig.from_json_file(__a )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_a : Any = UMTaEncoderModel(__a )
else:
_a : Tuple = UMTaForConditionalGeneration(__a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__a ,__a ,__a ,__a ,__a )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__a )
# Verify that we can load the checkpoint.
model.from_pretrained(__a )
print('''Done''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
a__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 14 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def lowerCamelCase (a_ :Tuple) -> Optional[int]:
lowercase :Union[str, Any] = botoa.client('''iam''')
lowercase :Dict = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=a_ , AssumeRolePolicyDocument=json.dumps(a_ , indent=2))
lowercase :Union[str, Any] = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=a_ , PolicyName=F"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(a_ , indent=2) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"""role {role_name} already exists. Using existing one""")
def lowerCamelCase (a_ :Optional[Any]) -> List[Any]:
lowercase :Dict = botoa.client('''iam''')
return iam_client.get_role(RoleName=a_)["Role"]["Arn"]
def lowerCamelCase () -> Dict:
lowercase :Optional[int] = _ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , a_ , )
lowercase :Any = None
if credentials_configuration == 0:
lowercase :str = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''')
lowercase :Optional[Any] = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''')
lowercase :List[Any] = _ask_field('''AWS Access Key ID: ''')
lowercase :Union[str, Any] = aws_access_key_id
lowercase :List[str] = _ask_field('''AWS Secret Access Key: ''')
lowercase :Dict = aws_secret_access_key
lowercase :List[str] = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''')
lowercase :Union[str, Any] = aws_region
lowercase :Any = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , a_ , )
if role_management == 0:
lowercase :str = _ask_field('''Enter your IAM role name: ''')
else:
lowercase :List[str] = '''accelerate_sagemaker_execution_role'''
print(F"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""")
_create_iam_role_for_sagemaker(a_)
lowercase :Union[str, Any] = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a_ , error_message='''Please enter yes or no.''' , )
lowercase :str = None
if is_custom_docker_image:
lowercase :Optional[int] = _ask_field('''Enter your Docker image: ''' , lambda a_: str(a_).lower())
lowercase :Union[str, Any] = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a_ , error_message='''Please enter yes or no.''' , )
lowercase :Union[str, Any] = None
if is_sagemaker_inputs_enabled:
lowercase :Any = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda a_: str(a_).lower() , )
lowercase :Dict = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a_ , error_message='''Please enter yes or no.''' , )
lowercase :Optional[Any] = None
if is_sagemaker_metrics_enabled:
lowercase :Tuple = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda a_: str(a_).lower() , )
lowercase :Optional[Any] = _ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
lowercase :Optional[Any] = {}
lowercase :int = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=a_ , error_message='''Please enter yes or no.''' , )
if use_dynamo:
lowercase :Optional[int] = '''dynamo_'''
lowercase :str = _ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
lowercase :Dict = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a_ , error_message='''Please enter yes or no.''' , )
if use_custom_options:
lowercase :List[str] = _ask_options(
'''Which mode do you want to use?''' , a_ , lambda a_: TORCH_DYNAMO_MODES[int(a_)] , default='''default''' , )
lowercase :Any = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a_ , error_message='''Please enter yes or no.''' , )
lowercase :Any = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=a_ , error_message='''Please enter yes or no.''' , )
lowercase :Tuple = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
lowercase :Optional[Any] = _ask_options(
a_ , a_ , lambda a_: SAGEMAKER_PARALLEL_EC2_INSTANCES[int(a_)])
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
lowercase :List[Any] = _ask_field(a_ , lambda a_: str(a_).lower() , default='''ml.p3.2xlarge''')
lowercase :Optional[int] = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
lowercase :Dict = _ask_field(
'''How many machines do you want use? [1]: ''' , a_ , default=1 , )
lowercase :Dict = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''')
return SageMakerConfig(
image_uri=a_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=a_ , use_cpu=a_ , dynamo_config=a_ , eca_instance_type=a_ , profile=a_ , region=a_ , iam_role_name=a_ , mixed_precision=a_ , num_machines=a_ , sagemaker_inputs_file=a_ , sagemaker_metrics_file=a_ , )
| 475 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
UpperCAmelCase = '''
Human: <<task>>
Assistant: '''
UpperCAmelCase = '''huggingface-tools/default-prompts'''
UpperCAmelCase = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def lowerCamelCase (a_ :int , a_ :str , a_ :Dict="run") -> Optional[Any]:
if prompt_or_repo_id is None:
lowercase :Tuple = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , a_) is not None:
return prompt_or_repo_id
lowercase :List[str] = cached_file(
a_ , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name})
with open(a_ , '''r''' , encoding='''utf-8''') as f:
return f.read()
| 475 | 1 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ):
__lowerCAmelCase = 1
@register_to_config
def __init__( self : Union[str, Any] , lowerCamelCase_ : Union[str, Any] = 1000 , lowerCamelCase_ : int = None ):
"""simple docstring"""
self.set_timesteps(UpperCamelCase__ )
# standard deviation of the initial noise distribution
UpperCamelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCamelCase = 4
# running values
UpperCamelCase = []
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] = None ):
"""simple docstring"""
UpperCamelCase = num_inference_steps
UpperCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
UpperCamelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
UpperCamelCase = torch.sin(steps * math.pi / 2 ) ** 2
UpperCamelCase = (1.0 - self.betas**2) ** 0.5
UpperCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
UpperCamelCase = timesteps.to(UpperCamelCase__ )
UpperCamelCase = []
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] = True , ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler""" )
UpperCamelCase = (self.timesteps == timestep).nonzero().item()
UpperCamelCase = timestep_index + 1
UpperCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase__ )
if len(self.ets ) == 1:
UpperCamelCase = self.ets[-1]
elif len(self.ets ) == 2:
UpperCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCamelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
UpperCamelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
UpperCamelCase = self._get_prev_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[str, Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : List[str] ):
"""simple docstring"""
return sample
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.alphas[timestep_index]
UpperCamelCase = self.betas[timestep_index]
UpperCamelCase = self.alphas[prev_timestep_index]
UpperCamelCase = self.betas[prev_timestep_index]
UpperCamelCase = (sample - sigma * ets) / max(UpperCamelCase__ , 1E-8 )
UpperCamelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Union[str, Any] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 537 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : Tuple = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__lowercase : int = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__lowercase : Dict = {"""facebook/blenderbot-3B""": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase_ ( ):
lowerCamelCase_ = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCamelCase_ = bs[:]
lowerCamelCase_ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
lowerCamelCase_ = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def lowerCamelCase_ ( _lowerCamelCase : Any ):
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Any = VOCAB_FILES_NAMES
__lowercase :List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase :Dict = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="replace" , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> Any:
'''simple docstring'''
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
with open(UpperCamelCase__ , encoding='''utf-8''' ) as vocab_handle:
lowerCamelCase_ = json.load(UpperCamelCase__ )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ = errors # how to handle errors in decoding
lowerCamelCase_ = bytes_to_unicode()
lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__ , encoding='''utf-8''' ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split('''\n''' )[1:-1]
lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase_ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase_ = {}
lowerCamelCase_ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase_ = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return len(self.encoder )
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = tuple(UpperCamelCase__ )
lowerCamelCase_ = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
lowerCamelCase_ = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ , lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(UpperCamelCase__ ):
try:
lowerCamelCase_ = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(UpperCamelCase__ )
lowerCamelCase_ = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
lowerCamelCase_ = get_pairs(UpperCamelCase__ )
lowerCamelCase_ = ''' '''.join(UpperCamelCase__ )
lowerCamelCase_ = word
return word
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = []
for token in re.findall(self.pat , UpperCamelCase__ ):
lowerCamelCase_ = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(''' ''' ) )
return bpe_tokens
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.decoder.get(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = ''''''.join(UpperCamelCase__ )
lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '''\n''' )
lowerCamelCase_ = 0
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowerCamelCase_ = token_index
writer.write(''' '''.join(UpperCamelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
lowerCamelCase_ = ''' ''' + text
return (text, kwargs)
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[str]:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase__ )
lowerCamelCase_ = ''' '''.join(UpperCamelCase__ )
lowerCamelCase_ = self.encode(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.model_max_length:
lowerCamelCase_ = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids | 142 | 0 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
A_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
A_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
A_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def __UpperCamelCase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) ,reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] ,)
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : str=False ):
if return_pvalue:
SCREAMING_SNAKE_CASE:Optional[Any] = pearsonr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )[0] )}
| 719 |
'''simple docstring'''
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Dict = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( snake_case = 5000 ):
SCREAMING_SNAKE_CASE:int = [(i * (3 * i - 1)) // 2 for i in range(1 , snake_case )]
for i, pentagonal_i in enumerate(snake_case ):
for j in range(snake_case , len(snake_case ) ):
SCREAMING_SNAKE_CASE:int = pentagonal_nums[j]
SCREAMING_SNAKE_CASE:Any = pentagonal_i + pentagonal_j
SCREAMING_SNAKE_CASE:int = pentagonal_j - pentagonal_i
if is_pentagonal(snake_case ) and is_pentagonal(snake_case ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 465 | 0 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 0
__magic_name__ = False
__magic_name__ = 3.0
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=__snake_case ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def a_ ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
snake_case = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2 )
AcceleratorState._reset_state()
snake_case = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
snake_case = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_0_0_0 )
self.assertEqual(scaler._enabled , __snake_case )
@require_multi_gpu
def a_ ( self ):
snake_case = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__snake_case , env=os.environ.copy() )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
_SCREAMING_SNAKE_CASE : List[Any] = Accelerator(kwargs_handlers=[ddp_scaler])
_SCREAMING_SNAKE_CASE : int = torch.nn.Linear(1_00, 2_00)
_SCREAMING_SNAKE_CASE : str = accelerator.prepare(model)
# Check the values changed in kwargs
_SCREAMING_SNAKE_CASE : Optional[int] = ""
_SCREAMING_SNAKE_CASE : int = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 550 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE : int = logging.getLogger()
def UpperCAmelCase__ ():
"""simple docstring"""
snake_case = argparse.ArgumentParser()
parser.add_argument('''-f''' )
snake_case = parser.parse_args()
return args.f
class A__ ( snake_case__ ):
"""simple docstring"""
def a_ ( self ):
snake_case = logging.StreamHandler(sys.stdout )
logger.addHandler(__snake_case )
def a_ ( self , __snake_case ):
snake_case = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(__snake_case , '''argv''' , __snake_case ):
snake_case = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__snake_case , 0.666 )
@slow
@require_torch_non_multi_gpu
def a_ ( self ):
snake_case = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__snake_case )
snake_case = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__snake_case )
snake_case = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__snake_case )
| 550 | 1 |
from string import ascii_uppercase
__lowerCamelCase = {str(ord(c) - 55): c for c in ascii_uppercase}
def _a ( __UpperCamelCase , __UpperCamelCase ):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 3_6:
raise ValueError("""base must be <= 36""" )
a_ : int = """"""
a_ : List[Any] = 0
a_ : Tuple = 0
while div != 1:
a_ , a_ : Optional[int] = divmod(lowerCAmelCase__ , lowerCAmelCase__ )
if base >= 1_1 and 9 < mod < 3_6:
a_ : Optional[Any] = ALPHABET_VALUES[str(lowerCAmelCase__ )]
else:
a_ : Tuple = str(lowerCAmelCase__ )
new_value += actual_value
a_ : Any = num // base
a_ : int = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(lowerCAmelCase__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 706 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ['''ViTFeatureExtractor''']
__lowerCamelCase = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 478 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase : int = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 422 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def lowercase_ ( _lowercase , _lowercase , _lowercase = 16_000 ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = int(round(sample_rate * max_length ) )
if len(_lowercase ) <= sample_length:
return wav
lowerCamelCase_ : Optional[int] = randint(0 , len(_lowercase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __lowercase :
lowerCamelCase : Optional[str] = field(default=_lowercase , metadata={"help": "Name of a dataset from the datasets package"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "A file containing the training audio paths and labels."} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "A file containing the validation audio paths and labels."} )
lowerCamelCase : str = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
lowerCamelCase : str = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
lowerCamelCase : str = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
lowerCamelCase : str = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
lowerCamelCase : float = field(
default=20 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class __lowercase :
lowerCamelCase : str = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
lowerCamelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Name or path of preprocessor config."} )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCamelCase : Optional[bool] = field(
default=_lowercase , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def UpperCAmelCase__ (self ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , A , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def lowercase_ ( ) -> Any:
'''simple docstring'''
lowerCamelCase_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Any = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ : str = training_args.get_process_log_level()
logger.setLevel(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowerCamelCase_ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
lowerCamelCase_ : List[str] = DatasetDict()
lowerCamelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
F"""{", ".join(raw_datasets["train"].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
'''Make sure to set `--label_column_name` to the correct text column - one of '''
F"""{", ".join(raw_datasets["train"].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowerCamelCase_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowerCamelCase_ : Any = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowerCamelCase_ : Tuple = feature_extractor.model_input_names[0]
def train_transforms(_lowercase ):
lowerCamelCase_ : Any = []
for audio in batch[data_args.audio_column_name]:
lowerCamelCase_ : Any = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_lowercase )
lowerCamelCase_ : Optional[int] = feature_extractor(_lowercase , sampling_rate=feature_extractor.sampling_rate )
lowerCamelCase_ : Dict = {model_input_name: inputs.get(_lowercase )}
lowerCamelCase_ : Optional[Any] = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_lowercase ):
lowerCamelCase_ : List[Any] = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
lowerCamelCase_ : Optional[Any] = feature_extractor(_lowercase , sampling_rate=feature_extractor.sampling_rate )
lowerCamelCase_ : Optional[int] = {model_input_name: inputs.get(_lowercase )}
lowerCamelCase_ : Optional[Any] = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowerCamelCase_ : Optional[Any] = raw_datasets['''train'''].features[data_args.label_column_name].names
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = {}, {}
for i, label in enumerate(_lowercase ):
lowerCamelCase_ : Optional[int] = str(_lowercase )
lowerCamelCase_ : str = label
# Load the accuracy metric from the datasets package
lowerCamelCase_ : str = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_lowercase ):
lowerCamelCase_ : Optional[Any] = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_lowercase , references=eval_pred.label_ids )
lowerCamelCase_ : int = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowercase ) , labelaid=_lowercase , idalabel=_lowercase , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : Optional[Any] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCamelCase_ : List[str] = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_lowercase , output_all_columns=_lowercase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCamelCase_ : Optional[int] = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_lowercase , output_all_columns=_lowercase )
# Initialize our trainer
lowerCamelCase_ : str = Trainer(
model=_lowercase , args=_lowercase , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=_lowercase , tokenizer=_lowercase , )
# Training
if training_args.do_train:
lowerCamelCase_ : int = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ : Union[str, Any] = last_checkpoint
lowerCamelCase_ : List[str] = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase_ : Optional[int] = trainer.evaluate()
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
# Write model card and (optionally) push to hub
lowerCamelCase_ : List[Any] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
if __name__ == "__main__":
main()
| 422 | 1 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[int] = set()
# edges = list of graph's edges
__lowerCamelCase : Optional[int] = get_edges(SCREAMING_SNAKE_CASE__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__lowerCamelCase : Any = edges.pop()
chosen_vertices.add(SCREAMING_SNAKE_CASE__ )
chosen_vertices.add(SCREAMING_SNAKE_CASE__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(SCREAMING_SNAKE_CASE__ )
return chosen_vertices
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 704 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'vocab_file': 'vocab.json'}
lowercase_ = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
lowercase_ = {'mgp-str': 2_7}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: Any , a: int , a: int="[GO]" , a: Optional[Any]="[GO]" , a: Tuple="[s]" , a: List[Any]="[GO]" , **a: str ):
super().__init__(
unk_token=a , bos_token=a , eos_token=a , pad_token=a , **a , )
with open(a , encoding='utf-8' ) as vocab_handle:
__lowerCamelCase : List[Any] = json.load(a )
__lowerCamelCase : Optional[int] = {v: k for k, v in self.vocab.items()}
@property
def _snake_case ( self: Tuple ):
return len(self.vocab )
def _snake_case ( self: Tuple ):
return dict(self.vocab , **self.added_tokens_encoder )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : List[str] = []
for s in text:
char_tokens.extend(a )
return char_tokens
def _snake_case ( self: int , a: List[str] ):
return self.vocab.get(a , self.vocab.get(self.unk_token ) )
def _snake_case ( self: List[Any] , a: Tuple ):
return self.decoder.get(a )
def _snake_case ( self: List[Any] , a: str , a: Optional[str] = None ):
if not os.path.isdir(a ):
logger.error('Vocabulary path ({}) should be a directory'.format(a ) )
return
__lowerCamelCase : str = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=a , ensure_ascii=a ) + '\n' )
return (vocab_file,)
| 230 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Tuple = ['''image_processor''', '''tokenizer''']
_snake_case : Any = '''ViTImageProcessor'''
_snake_case : str = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCamelCase , )
UpperCAmelCase_ : str = kwargs.pop('feature_extractor' )
UpperCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> Optional[Any]:
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
UpperCAmelCase_ : int = self.tokenizer(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
if visual_prompt is not None:
UpperCAmelCase_ : str = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
if images is not None:
UpperCAmelCase_ : Union[str, Any] = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase_ : Tuple = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase_ : List[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase_ : Optional[Any] = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_UpperCamelCase ) , tensor_type=_UpperCamelCase )
def __UpperCAmelCase ( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> List[Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCamelCase , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self ) -> int:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCamelCase , )
return self.image_processor
| 406 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowerCamelCase (ctypes.Structure ):
'''simple docstring'''
_snake_case : str = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def lowercase__ ( ):
'''simple docstring'''
if os.name == "nt":
UpperCAmelCase_ : Optional[int] = CursorInfo()
UpperCAmelCase_ : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__snake_case , ctypes.byref(__snake_case ) )
UpperCAmelCase_ : Any = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__snake_case , ctypes.byref(__snake_case ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def lowercase__ ( ):
'''simple docstring'''
if os.name == "nt":
UpperCAmelCase_ : Tuple = CursorInfo()
UpperCAmelCase_ : Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__snake_case , ctypes.byref(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__snake_case , ctypes.byref(__snake_case ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def lowercase__ ( ):
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 406 | 1 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ) -> str:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
a : List[Any] = nn.Parameter(UpperCAmelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
a : List[str] = nn.Parameter(UpperCAmelCase__ )
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
# set torch weights for 1-to-1 comparison
a : List[str] = np.asarray(weights[0] )
a : Dict = np.asarray(weights[1] )
a : Any = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCAmelCase__ ).view(-1 , UpperCAmelCase__ ).contiguous().transpose(0 , 1 ) , )
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
# set torch weights for 1-to-1 comparison
a : Dict = np.asarray(weights[0] )
a : List[Any] = np.asarray(weights[1] )
a : Optional[int] = np.asarray(weights[2] )
a : Optional[Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCAmelCase__ ).view(-1 , UpperCAmelCase__ ).contiguous().transpose(0 , 1 ) , )
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
# layernorm 1
a : List[Any] = weights[0][0][0]
a : Dict = np.asarray(layer_norm_a[0] )
a : Dict = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) , )
# lsh weights + output
a : int = weights[0][1]
if len(UpperCAmelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCAmelCase__ , torch_block.attention , UpperCAmelCase__ )
else:
set_layer_weights_in_torch_local(UpperCAmelCase__ , torch_block.attention , UpperCAmelCase__ )
# intermediate weighs
a : List[str] = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCAmelCase__ ) == 4:
a : List[str] = intermediate_weights[2]
# layernorm 2
a : Tuple = np.asarray(intermediate_weights[0][0] )
a : int = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) , )
# intermediate dense
a : Optional[int] = np.asarray(intermediate_weights[1][0] )
a : Optional[Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase__ ) , )
# intermediate out
a : Optional[Any] = np.asarray(intermediate_weights[4][0] )
a : Union[str, Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase__ ) , )
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]:
# reformer model
a : List[Any] = torch_model.reformer
# word embeds
a : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCAmelCase__ ) , )
if isinstance(weights[3] , UpperCAmelCase__ ):
a : Any = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
a : Dict = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
a : Dict = nn.Parameter(torch.tensor(UpperCAmelCase__ ) )
a : Optional[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCAmelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
a : Tuple = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# output layer norm
a : str = np.asarray(weights[7][0] )
a : List[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) , )
# output embeddings
a : Optional[Any] = np.asarray(weights[9][0] )
a : str = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase__ ) , )
def A_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]:
# Initialise PyTorch model
a : Any = ReformerConfig.from_json_file(UpperCAmelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
a : Any = ReformerModelWithLMHead(UpperCAmelCase__ )
with open(UpperCAmelCase__ , 'rb' ) as f:
a : Union[str, Any] = pickle.load(UpperCAmelCase__ )['weights']
set_model_weights_in_torch(UpperCAmelCase__ , UpperCAmelCase__ , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 509 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class A_ ( unittest.TestCase ):
"""simple docstring"""
lowercase : int = StableDiffusionLDMaDPipeline
lowercase : Any = TEXT_TO_IMAGE_PARAMS
lowercase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self ) -> Optional[int]:
torch.manual_seed(0 )
a : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
a : str = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
a : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
a : Tuple = CLIPTextModel(__UpperCAmelCase )
a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a : str = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> str:
if str(__UpperCAmelCase ).startswith('mps' ):
a : List[str] = torch.manual_seed(__UpperCAmelCase )
else:
a : int = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self ) -> Dict:
a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : Tuple = self.get_dummy_components()
a : Tuple = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
a : Tuple = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a : List[Any] = self.get_dummy_inputs(__UpperCAmelCase )
a : List[Any] = ldmad_pipe(**__UpperCAmelCase )
a , a : str = output.rgb, output.depth
a : List[Any] = rgb[0, -3:, -3:, -1]
a : str = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a : Any = np.array(
[0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] )
a : Any = np.array([103.4_6727, 85.81_2004, 87.84_9236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def lowercase_ ( self ) -> List[Any]:
a : Optional[int] = self.get_dummy_components()
a : Union[str, Any] = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
a : List[str] = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a : Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase )
a : Optional[Any] = 3 * [inputs['prompt']]
# forward
a : List[str] = ldmad_pipe(**__UpperCAmelCase )
a , a : int = output.rgb, output.depth
a : int = rgb_slice_a[0, -3:, -3:, -1]
a : List[Any] = depth_slice_a[0, -3:, -1]
a : Tuple = self.get_dummy_inputs(__UpperCAmelCase )
a : List[str] = 3 * [inputs.pop('prompt' )]
a : Optional[int] = ldmad_pipe.tokenizer(
__UpperCAmelCase , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors='pt' , )
a : Union[str, Any] = text_inputs['input_ids'].to(__UpperCAmelCase )
a : List[Any] = ldmad_pipe.text_encoder(__UpperCAmelCase )[0]
a : str = prompt_embeds
# forward
a : Any = ldmad_pipe(**__UpperCAmelCase )
a , a : Optional[Any] = output.rgb, output.depth
a : Dict = rgb_slice_a[0, -3:, -3:, -1]
a : Any = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def lowercase_ ( self ) -> Optional[int]:
a : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : Tuple = self.get_dummy_components()
a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
a : Union[str, Any] = StableDiffusionLDMaDPipeline(**__UpperCAmelCase )
a : Any = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a : List[str] = self.get_dummy_inputs(__UpperCAmelCase )
a : List[str] = 'french fries'
a : List[str] = ldmad_pipe(**__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
a , a : Union[str, Any] = output.rgb, output.depth
a : List[Any] = rgb[0, -3:, -3:, -1]
a : Optional[int] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
a : Optional[int] = np.array(
[0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] )
a : Union[str, Any] = np.array([107.8_4738, 84.6_2802, 89.96_2135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ) -> Union[str, Any]:
a : Any = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a : str = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
a : Any = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
a : int = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self ) -> Dict:
a : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
a : Union[str, Any] = ldmad_pipe.to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a : List[Any] = self.get_inputs(__UpperCAmelCase )
a : Dict = ldmad_pipe(**__UpperCAmelCase )
a , a : Union[str, Any] = output.rgb, output.depth
a : Union[str, Any] = rgb[0, -3:, -3:, -1].flatten()
a : Union[str, Any] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
a : List[str] = np.array(
[0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] )
a : Optional[int] = np.array(
[0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class A_ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ) -> int:
a : Tuple = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a : Dict = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
a : Union[str, Any] = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
a : str = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self ) -> Optional[int]:
a : Dict = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a : Tuple = self.get_inputs(__UpperCAmelCase )
a : Dict = ldmad_pipe(**__UpperCAmelCase )
a , a : Union[str, Any] = output.rgb, output.depth
a : int = 0.49_5586
a : Dict = 0.3379_5515
a : Optional[Any] = 112.4_8518
a : List[Any] = 98.48_9746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def lowercase_ ( self ) -> Any:
a : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(__UpperCAmelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a : Dict = self.get_inputs(__UpperCAmelCase )
a : Union[str, Any] = ldmad_pipe(**__UpperCAmelCase )
a , a : Optional[Any] = output.rgb, output.depth
a : Dict = 0.419_4127
a : Union[str, Any] = 0.3537_5586
a : Tuple = 0.563_8502
a : str = 0.3468_6103
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 509 | 1 |
from math import factorial
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> float:
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) or not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
lowercase__ : Dict = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
lowercase__ : Tuple = float(factorial(SCREAMING_SNAKE_CASE_ ) )
coefficient /= factorial(SCREAMING_SNAKE_CASE_ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75)) | 397 |
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
raise TypeError("Input value must be an 'int' type" )
lowercase__ : str = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod() | 397 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Dict = SwinConfig(image_size=1_9_2 )
if "base" in model_name:
snake_case_ : Dict = 6
snake_case_ : Optional[Any] = 1_2_8
snake_case_ : str = (2, 2, 1_8, 2)
snake_case_ : Dict = (4, 8, 1_6, 3_2)
elif "large" in model_name:
snake_case_ : str = 1_2
snake_case_ : Optional[int] = 1_9_2
snake_case_ : Any = (2, 2, 1_8, 2)
snake_case_ : int = (6, 1_2, 2_4, 4_8)
else:
raise ValueError("Model not supported, only supports base and large variants" )
snake_case_ : Any = window_size
snake_case_ : Any = embed_dim
snake_case_ : Dict = depths
snake_case_ : Union[str, Any] = num_heads
return config
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if "encoder.mask_token" in name:
snake_case_ : int = name.replace("encoder.mask_token", "embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
snake_case_ : List[str] = name.replace("encoder.patch_embed.proj", "embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
snake_case_ : Tuple = name.replace("encoder.patch_embed.norm", "embeddings.norm" )
if "attn.proj" in name:
snake_case_ : str = name.replace("attn.proj", "attention.output.dense" )
if "attn" in name:
snake_case_ : List[str] = name.replace("attn", "attention.self" )
if "norm1" in name:
snake_case_ : int = name.replace("norm1", "layernorm_before" )
if "norm2" in name:
snake_case_ : List[Any] = name.replace("norm2", "layernorm_after" )
if "mlp.fc1" in name:
snake_case_ : Optional[Any] = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
snake_case_ : int = name.replace("mlp.fc2", "output.dense" )
if name == "encoder.norm.weight":
snake_case_ : List[Any] = "layernorm.weight"
if name == "encoder.norm.bias":
snake_case_ : Optional[Any] = "layernorm.bias"
if "decoder" in name:
pass
else:
snake_case_ : str = "swin." + name
return name
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case_ : Optional[int] = orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if "attn_mask" in key:
pass
elif "qkv" in key:
snake_case_ : str = key.split("." )
snake_case_ : int = int(key_split[2] )
snake_case_ : str = int(key_split[4] )
snake_case_ : int = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case_ : Optional[Any] = val[:dim, :]
snake_case_ : Any = val[
dim : dim * 2, :
]
snake_case_ : List[str] = val[-dim:, :]
else:
snake_case_ : Tuple = val[
:dim
]
snake_case_ : int = val[
dim : dim * 2
]
snake_case_ : Any = val[
-dim:
]
else:
snake_case_ : int = val
return orig_state_dict
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Union[str, Any] = torch.load(__SCREAMING_SNAKE_CASE, map_location="cpu" )["model"]
snake_case_ : Any = get_swin_config(__SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = SwinForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Tuple = convert_state_dict(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ : Optional[int] = ViTImageProcessor(size={"height": 1_9_2, "width": 1_9_2} )
snake_case_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE, stream=__SCREAMING_SNAKE_CASE ).raw )
snake_case_ : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE, return_tensors="pt" )
with torch.no_grad():
snake_case_ : Tuple = model(**__SCREAMING_SNAKE_CASE ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a_ = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 92 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
a_ = logging.get_logger(__name__)
class UpperCAmelCase_ :
UpperCAmelCase_ = None
@experimental
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
return _map_with_joblib(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : str = num_proc if num_proc <= len(__SCREAMING_SNAKE_CASE ) else len(__SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = [] # We organize the splits ourselve (contiguous splits)
for index in range(__SCREAMING_SNAKE_CASE ):
snake_case_ : List[str] = len(__SCREAMING_SNAKE_CASE ) // num_proc
snake_case_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) % num_proc
snake_case_ : Optional[Any] = div * index + min(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__SCREAMING_SNAKE_CASE ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f'Error dividing inputs iterable among processes. '
f'Total number of objects {len(__SCREAMING_SNAKE_CASE )}, '
f'length: {sum(len(i[1] ) for i in split_kwds )}' )
logger.info(
f'Spawning {num_proc} processes for {len(__SCREAMING_SNAKE_CASE )} objects in slices of {[len(i[1] ) for i in split_kwds]}' )
snake_case_ , snake_case_ : Tuple = None, None
if not disable_tqdm:
snake_case_ , snake_case_ : int = (RLock(),), tqdm.set_lock
with Pool(__SCREAMING_SNAKE_CASE, initargs=__SCREAMING_SNAKE_CASE, initializer=__SCREAMING_SNAKE_CASE ) as pool:
snake_case_ : Union[str, Any] = pool.map(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
logger.info(f'Finished {num_proc} processes' )
snake_case_ : str = [obj for proc_res in mapped for obj in proc_res]
logger.info(f'Unpacked {len(__SCREAMING_SNAKE_CASE )} objects' )
return mapped
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=__SCREAMING_SNAKE_CASE ):
return joblib.Parallel()(
joblib.delayed(__SCREAMING_SNAKE_CASE )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Union[str, Any] = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
snake_case_ : Any = None
| 92 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 470 | import warnings
from .generation import TFGenerationMixin
class snake_case ( __snake_case ):
"""simple docstring"""
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" ,__snake_case ,)
| 321 | 0 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a = logging.get_logger(__name__)
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
if not conversation_id:
_UpperCAmelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCAmelCase = []
if generated_responses is None:
_UpperCAmelCase = []
_UpperCAmelCase = conversation_id
_UpperCAmelCase = past_user_inputs
_UpperCAmelCase = generated_responses
_UpperCAmelCase = text
def __eq__( self , _SCREAMING_SNAKE_CASE ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
_UpperCAmelCase = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
_UpperCAmelCase = text
def UpperCAmelCase ( self ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_UpperCAmelCase = None
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
self.generated_responses.append(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
_UpperCAmelCase = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
_UpperCAmelCase = """user""" if is_user else """bot"""
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
__lowercase , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class _A ( __lowercase ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.tokenizer.pad_token_id is None:
_UpperCAmelCase = self.tokenizer.eos_token
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = {}
_UpperCAmelCase = {}
_UpperCAmelCase = {}
if min_length_for_response is not None:
_UpperCAmelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCAmelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCAmelCase = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_SCREAMING_SNAKE_CASE )
return preprocess_params, forward_params, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = super().__call__(_SCREAMING_SNAKE_CASE , num_workers=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=32 ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
_UpperCAmelCase = self.tokenizer._build_conversation_input_ids(_SCREAMING_SNAKE_CASE )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCAmelCase = self._legacy_parse_and_tokenize(_SCREAMING_SNAKE_CASE )
if self.framework == "pt":
_UpperCAmelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_UpperCAmelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=10 , **_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
_UpperCAmelCase = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
_UpperCAmelCase = max_length - minimum_tokens
_UpperCAmelCase = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCAmelCase = model_inputs["""attention_mask"""][:, -trim:]
_UpperCAmelCase = model_inputs.pop("""conversation""" )
_UpperCAmelCase = max_length
_UpperCAmelCase = self.model.generate(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.model.config.is_encoder_decoder:
_UpperCAmelCase = 1
else:
_UpperCAmelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
_UpperCAmelCase = model_outputs["""output_ids"""]
_UpperCAmelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(_SCREAMING_SNAKE_CASE )
return conversation
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.tokenizer.eos_token_id
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > self.tokenizer.model_max_length:
_UpperCAmelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids | 175 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case = "cpu" , snake_case = None ) -> None:
_UpperCAmelCase = torch.load(snake_case , map_location=snake_case )
for k, v in tqdm(state_dict.items() ):
if not isinstance(snake_case , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
_UpperCAmelCase = v.half()
if save_path is None: # overwrite src_path
_UpperCAmelCase = src_path
torch.save(snake_case , snake_case )
if __name__ == "__main__":
fire.Fire(convert) | 175 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
a : List[Any] = logging.get_logger(__name__)
a : Optional[Any] = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """layoutlmv3"""
def __init__( self : List[str] , a_ : Any=50_265 , a_ : Tuple=768 , a_ : List[str]=12 , a_ : Dict=12 , a_ : List[Any]=3_072 , a_ : Union[str, Any]="gelu" , a_ : Any=0.1 , a_ : Optional[int]=0.1 , a_ : Optional[int]=512 , a_ : Tuple=2 , a_ : Tuple=0.02 , a_ : str=1e-5 , a_ : List[Any]=1 , a_ : Union[str, Any]=0 , a_ : int=2 , a_ : Optional[Any]=1_024 , a_ : Tuple=128 , a_ : Any=128 , a_ : Tuple=True , a_ : List[Any]=32 , a_ : Optional[Any]=128 , a_ : int=64 , a_ : Optional[Any]=256 , a_ : Any=True , a_ : Dict=True , a_ : Optional[int]=True , a_ : List[Any]=224 , a_ : Any=3 , a_ : Union[str, Any]=16 , a_ : int=None , **a_ : Tuple , ):
"""simple docstring"""
super().__init__(
vocab_size=a_ , hidden_size=a_ , num_hidden_layers=a_ , num_attention_heads=a_ , intermediate_size=a_ , hidden_act=a_ , hidden_dropout_prob=a_ , attention_probs_dropout_prob=a_ , max_position_embeddings=a_ , type_vocab_size=a_ , initializer_range=a_ , layer_norm_eps=a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
__snake_case = max_ad_position_embeddings
__snake_case = coordinate_size
__snake_case = shape_size
__snake_case = has_relative_attention_bias
__snake_case = rel_pos_bins
__snake_case = max_rel_pos
__snake_case = has_spatial_attention_bias
__snake_case = rel_ad_pos_bins
__snake_case = max_rel_ad_pos
__snake_case = text_embed
__snake_case = visual_embed
__snake_case = input_size
__snake_case = num_channels
__snake_case = patch_size
__snake_case = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = version.parse("""1.12""" )
@property
def A ( self : Any ):
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
else:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels"}),
] )
@property
def A ( self : str ):
"""simple docstring"""
return 1e-5
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return 12
def A ( self : Dict , a_ : "ProcessorMixin" , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional["TensorType"] = None , a_ : int = 3 , a_ : int = 40 , a_ : int = 40 , ):
"""simple docstring"""
setattr(processor.image_processor , "apply_ocr" , a_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__snake_case = compute_effective_axis_dimension(
a_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__snake_case = processor.tokenizer.num_special_tokens_to_add(a_ )
__snake_case = compute_effective_axis_dimension(
a_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a_ )
# Generate dummy inputs according to compute batch and sequence
__snake_case = [[" ".join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__snake_case = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__snake_case = self._generate_dummy_images(a_ , a_ , a_ , a_ )
__snake_case = dict(
processor(
a_ , text=a_ , boxes=a_ , return_tensors=a_ , ) )
return inputs
| 69 |
'''simple docstring'''
from PIL import Image
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ , a__ = image.size
a__ = 0
a__ = image.load()
for i in range(_lowercase ):
for j in range(_lowercase ):
a__ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowercase ):
for i in range(_lowercase ):
a__ = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
UpperCamelCase_ : Tuple = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 331 | 0 |
import torch
def lowercase_ ( ):
"""simple docstring"""
if torch.cuda.is_available():
lowerCamelCase__ : int = torch.cuda.device_count()
else:
lowerCamelCase__ : int = 0
print(F"Successfully ran on {num_gpus} GPUs" )
if __name__ == "__main__":
main()
| 720 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A : Union[str, Any] = logging.get_logger(__name__)
A : Union[str, Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase :
"""simple docstring"""
A__ = field(
default=lowercase__ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase__)})
A__ = field(
default=lowercase__ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."})
A__ = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ = field(
default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
A__ = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
A__ = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
A__ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached training and evaluation sets"})
A__ = field(
default=lowercase__ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."})
A__ = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A__ = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."})
A__ = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
A__ = field(default=1 , metadata={"help": "multiple threads for converting example to features"})
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "train"
A__ = "dev"
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = 42
A__ = 42
A__ = 42
A__ = 42
def __init__( self : Optional[int] , __lowerCamelCase : SquadDataTrainingArguments , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Union[str, Split] = Split.train , __lowerCamelCase : Optional[bool] = False , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = "pt" , ):
'''simple docstring'''
lowerCamelCase__ : List[str] = args
lowerCamelCase__ : Tuple = is_language_sensitive
lowerCamelCase__ : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
lowerCamelCase__ : List[str] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase__ : str = mode
# Load data features from cache or dataset file
lowerCamelCase__ : Any = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase__ : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : List[str] = cached_features_file + ".lock"
with FileLock(__lowerCamelCase ):
if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache:
lowerCamelCase__ : str = time.time()
lowerCamelCase__ : Tuple = torch.load(__lowerCamelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase__ : Optional[Any] = self.old_features["features"]
lowerCamelCase__ : Optional[int] = self.old_features.get("dataset" , __lowerCamelCase )
lowerCamelCase__ : Optional[Any] = self.old_features.get("examples" , __lowerCamelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run" )
else:
if mode == Split.dev:
lowerCamelCase__ : List[Any] = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase__ : str = self.processor.get_train_examples(args.data_dir )
lowerCamelCase__ , lowerCamelCase__ : Tuple = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , )
lowerCamelCase__ : int = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , __lowerCamelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : List[str] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.features[i]
lowerCamelCase__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase__ : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase__ : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase__ : Any = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase__ : Any = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase__ : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase__ : List[str] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase__ : List[Any] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 5 | 0 |
import re
from filelock import FileLock
try:
import nltk
a : Optional[int] = True
except (ImportError, ModuleNotFoundError):
a : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def lowerCamelCase__ ( __lowerCamelCase : str ):
re.sub("""<n>""" , """""" , __lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowerCamelCase ) )
| 63 | from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class a__ :
_A = PegasusConfig
_A = {}
_A = "gelu"
def __init__( self : int , A_ : int , A_ : List[str]=13 , A_ : Optional[Any]=7 , A_ : Optional[Any]=True , A_ : Optional[int]=False , A_ : int=99 , A_ : List[Any]=32 , A_ : Optional[int]=2 , A_ : Tuple=4 , A_ : Optional[Any]=37 , A_ : List[Any]=0.1 , A_ : Any=0.1 , A_ : Optional[Any]=40 , A_ : str=2 , A_ : Optional[Any]=1 , A_ : Tuple=0 , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_: str = parent
lowerCamelCase_: int = batch_size
lowerCamelCase_: Any = seq_length
lowerCamelCase_: Optional[int] = is_training
lowerCamelCase_: Union[str, Any] = use_labels
lowerCamelCase_: List[Any] = vocab_size
lowerCamelCase_: Dict = hidden_size
lowerCamelCase_: str = num_hidden_layers
lowerCamelCase_: List[str] = num_attention_heads
lowerCamelCase_: List[Any] = intermediate_size
lowerCamelCase_: List[Any] = hidden_dropout_prob
lowerCamelCase_: Any = attention_probs_dropout_prob
lowerCamelCase_: int = max_position_embeddings
lowerCamelCase_: int = eos_token_id
lowerCamelCase_: Optional[int] = pad_token_id
lowerCamelCase_: str = bos_token_id
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase_: Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase_: str = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase_: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_: List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase_: List[Any] = prepare_pegasus_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def lowerCAmelCase ( self : List[str] , A_ : Optional[Any] , A_ : str ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_: Tuple = TFPegasusModel(config=A_ ).get_decoder()
lowerCamelCase_: Union[str, Any] = inputs_dict["""input_ids"""]
lowerCamelCase_: int = input_ids[:1, :]
lowerCamelCase_: List[Any] = inputs_dict["""attention_mask"""][:1, :]
lowerCamelCase_: Union[str, Any] = inputs_dict["""head_mask"""]
lowerCamelCase_: Tuple = 1
# first forward pass
lowerCamelCase_: Optional[int] = model(A_ , attention_mask=A_ , head_mask=A_ , use_cache=A_ )
lowerCamelCase_ , lowerCamelCase_: Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_: Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_: Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase_: Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase_: Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase_: int = model(A_ , attention_mask=A_ )[0]
lowerCamelCase_: Optional[int] = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase_: Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase_: Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase_: Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1e-3 )
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
if attention_mask is None:
lowerCamelCase_: Optional[int] = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase_: List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase_: int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase_: List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase_: List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_A = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
_A = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
_A = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A = True
_A = False
_A = False
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_: Tuple = TFPegasusModelTester(self )
lowerCamelCase_: Optional[int] = ConfigTester(self , config_class=A_ )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class a__ ( unittest.TestCase ):
_A = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
_A = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
_A = "google/pegasus-xsum"
@cached_property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCAmelCase ( self : str , **A_ : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_: Union[str, Any] = self.translate_src_text(**A_ )
assert self.expected_text == generated_words
def lowerCAmelCase ( self : str , **A_ : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_: List[Any] = self.tokenizer(self.src_text , **A_ , padding=A_ , return_tensors="""tf""" )
lowerCamelCase_: str = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A_ , )
lowerCamelCase_: Tuple = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A_ )
return generated_words
@slow
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 423 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_A = {
'vocab_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt',
},
'tokenizer_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'
),
'google/realm-orqa-nq-openqa': (
'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-nq-reader': (
'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-openqa': (
'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-reader': (
'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'
),
},
}
_A = {
'google/realm-cc-news-pretrained-embedder': 5_1_2,
'google/realm-cc-news-pretrained-encoder': 5_1_2,
'google/realm-cc-news-pretrained-scorer': 5_1_2,
'google/realm-cc-news-pretrained-openqa': 5_1_2,
'google/realm-orqa-nq-openqa': 5_1_2,
'google/realm-orqa-nq-reader': 5_1_2,
'google/realm-orqa-wq-openqa': 5_1_2,
'google/realm-orqa-wq-reader': 5_1_2,
}
_A = {
'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True},
'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-reader': {'do_lower_case': True},
'google/realm-orqa-wq-openqa': {'do_lower_case': True},
'google/realm-orqa-wq-reader': {'do_lower_case': True},
}
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
A : Tuple = VOCAB_FILES_NAMES
A : Any = PRETRAINED_VOCAB_FILES_MAP
A : Dict = PRETRAINED_INIT_CONFIGURATION
A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[int] = RealmTokenizer
def __init__(self , _a=None , _a=None , _a=True , _a="[UNK]" , _a="[SEP]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , _a=True , _a=None , **_a , ) -> Optional[Any]:
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
lowercase_ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _a ) != do_lower_case
or normalizer_state.get('strip_accents' , _a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _a ) != tokenize_chinese_chars
):
lowercase_ : Dict = getattr(_a , normalizer_state.pop('type' ) )
lowercase_ : int = do_lower_case
lowercase_ : List[Any] = strip_accents
lowercase_ : Optional[int] = tokenize_chinese_chars
lowercase_ : List[str] = normalizer_class(**_a )
lowercase_ : Tuple = do_lower_case
def _lowerCamelCase (self , _a , **_a ) -> Dict:
lowercase_ : int = PaddingStrategy.MAX_LENGTH
lowercase_ : Union[str, Any] = text
lowercase_ : Optional[Any] = kwargs.pop('text_pair' , _a )
lowercase_ : Any = kwargs.pop('return_tensors' , _a )
lowercase_ : str = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(_a ):
if batch_text_pair is not None:
lowercase_ : Any = batch_text_pair[idx]
else:
lowercase_ : Union[str, Any] = None
lowercase_ : Optional[int] = super().__call__(_a , _a , return_tensors=_a , **_a )
lowercase_ : int = encoded_candidates.get('input_ids' )
lowercase_ : int = encoded_candidates.get('attention_mask' )
lowercase_ : Tuple = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_a )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_a )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_a )
lowercase_ : Tuple = {key: item for key, item in output_data.items() if len(_a ) != 0}
return BatchEncoding(_a , tensor_type=_a )
def _lowerCamelCase (self , _a , _a=None ) -> Optional[int]:
lowercase_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase (self , _a , _a = None ) -> List[int]:
lowercase_ : str = [self.sep_token_id]
lowercase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase (self , _a , _a = None ) -> Tuple[str]:
lowercase_ : Dict = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 438 | '''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ :
"""simple docstring"""
def __init__(self , _a , _a=12 , _a=7 , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=32 , _a=2 , _a=4 , _a=37 , _a=0.1 , _a=0.1 , _a=512 , _a=0.02 , _a=0 , _a=None , ) -> Optional[Any]:
lowercase_ : List[Any] = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Optional[Any] = seq_length
lowercase_ : Optional[Any] = is_training
lowercase_ : Optional[int] = use_input_mask
lowercase_ : Any = use_labels
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : str = projection_dim
lowercase_ : str = num_hidden_layers
lowercase_ : List[str] = num_attention_heads
lowercase_ : List[str] = intermediate_size
lowercase_ : Optional[Any] = dropout
lowercase_ : Tuple = attention_dropout
lowercase_ : Optional[Any] = max_position_embeddings
lowercase_ : Any = initializer_range
lowercase_ : List[str] = scope
lowercase_ : Optional[Any] = bos_token_id
def _lowerCamelCase (self ) -> Tuple:
lowercase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Optional[int] = None
if self.use_input_mask:
lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowercase_ : Dict = input_mask.numpy()
lowercase_ ,lowercase_ : int = input_mask.shape
lowercase_ : int = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_a ):
lowercase_ : Tuple = 1
lowercase_ : List[Any] = 0
lowercase_ : List[Any] = self.get_config()
return config, input_ids, tf.convert_to_tensor(_a )
def _lowerCamelCase (self ) -> Any:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _lowerCamelCase (self , _a , _a , _a ) -> List[Any]:
lowercase_ : List[Any] = TFBlipTextModel(config=_a )
lowercase_ : List[str] = model(_a , attention_mask=_a , training=_a )
lowercase_ : Union[str, Any] = model(_a , training=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase (self ) -> Dict:
lowercase_ : Optional[Any] = self.prepare_config_and_inputs()
lowercase_ ,lowercase_ ,lowercase_ : Dict = config_and_inputs
lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( _snake_case , unittest.TestCase ):
"""simple docstring"""
A : Any = (TFBlipTextModel,) if is_tf_available() else ()
A : Union[str, Any] = False
A : int = False
A : str = False
def _lowerCamelCase (self ) -> int:
lowercase_ : Tuple = BlipTextModelTester(self )
lowercase_ : str = ConfigTester(self , config_class=_a , hidden_size=37 )
def _lowerCamelCase (self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowerCamelCase (self ) -> List[Any]:
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCamelCase (self ) -> Dict:
pass
def _lowerCamelCase (self ) -> Optional[int]:
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _lowerCamelCase (self ) -> List[Any]:
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _lowerCamelCase (self ) -> Any:
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _lowerCamelCase (self ) -> Optional[int]:
pass
@slow
def _lowerCamelCase (self ) -> Optional[Any]:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = TFBlipTextModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _lowerCamelCase (self , _a=True ) -> Optional[int]:
super().test_pt_tf_model_equivalence(allow_missing_keys=_a )
| 438 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = tempfile.mkdtemp()
_snake_case : Tuple = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_snake_case : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , lowerCamelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] , **lowerCamelCase_ : int ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __UpperCAmelCase ( self : str , **lowerCamelCase_ : int ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_snake_case : str = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizer()
_snake_case : List[str] = self.get_rust_tokenizer()
_snake_case : Dict = self.get_image_processor()
_snake_case : int = AlignProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case : Optional[Any] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase_ )
_snake_case : Any = AlignProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case : Any = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase_ )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Dict = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_snake_case : int = self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 )
_snake_case : List[Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : str = AlignProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : Optional[Any] = self.prepare_image_inputs()
_snake_case : str = image_processor(lowerCamelCase_ , return_tensors='np' )
_snake_case : Union[str, Any] = processor(images=lowerCamelCase_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Tuple = self.get_image_processor()
_snake_case : Optional[int] = self.get_tokenizer()
_snake_case : Optional[Any] = AlignProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : List[Any] = 'lower newer'
_snake_case : Union[str, Any] = processor(text=lowerCamelCase_ )
_snake_case : Dict = tokenizer(lowerCamelCase_ , padding='max_length' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : List[Any] = self.get_image_processor()
_snake_case : Dict = self.get_tokenizer()
_snake_case : int = AlignProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : Any = 'lower newer'
_snake_case : int = self.prepare_image_inputs()
_snake_case : Dict = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = self.get_image_processor()
_snake_case : Optional[Any] = self.get_tokenizer()
_snake_case : Optional[Any] = AlignProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : List[Any] = processor.batch_decode(lowerCamelCase_ )
_snake_case : Union[str, Any] = tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Tuple = self.get_tokenizer()
_snake_case : Optional[int] = AlignProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
_snake_case : Union[str, Any] = 'lower newer'
_snake_case : Optional[Any] = self.prepare_image_inputs()
_snake_case : Tuple = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 304 |
# Lint as: python3
import itertools
import os
import re
lowercase_ : List[Any] = re.compile(r'''([A-Z]+)([A-Z][a-z])''')
lowercase_ : Tuple = re.compile(r'''([a-z\d])([A-Z])''')
lowercase_ : Dict = re.compile(r'''(?<!_)_(?!_)''')
lowercase_ : Optional[int] = re.compile(r'''(_{2,})''')
lowercase_ : List[str] = r'''^\w+(\.\w+)*$'''
lowercase_ : int = r'''<>:/\|?*'''
def A__( __lowerCAmelCase ):
_snake_case : int = _uppercase_uppercase_re.sub(R'\1_\2' , __lowerCAmelCase )
_snake_case : Any = _lowercase_uppercase_re.sub(R'\1_\2' , __lowerCAmelCase )
return name.lower()
def A__( __lowerCAmelCase ):
_snake_case : List[str] = _single_underscore_re.split(__lowerCAmelCase )
_snake_case : Any = [_multiple_underscores_re.split(__lowerCAmelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__lowerCAmelCase ) if n != '' )
def A__( __lowerCAmelCase ):
if os.path.basename(__lowerCAmelCase ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
if os.path.basename(__lowerCAmelCase ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , __lowerCAmelCase ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(__lowerCAmelCase )}-{split}'''
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
_snake_case : Dict = filename_prefix_for_split(__lowerCAmelCase , __lowerCAmelCase )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
_snake_case : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
return F'''{filepath}*'''
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ):
_snake_case : Any = filename_prefix_for_split(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if shard_lengths:
_snake_case : Dict = len(__lowerCAmelCase )
_snake_case : Optional[int] = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(__lowerCAmelCase )]
if filetype_suffix:
_snake_case : Tuple = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
_snake_case : Dict = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 304 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __UpperCAmelCase ( a_: Optional[Any], a_: str, a_: Optional[Any]=None, a_: Any=None ):
if attention_mask is None:
_UpperCAmelCase : str = tf.cast(tf.math.not_equal(a_, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class A__ :
"""simple docstring"""
UpperCamelCase_ : Any = OPTConfig
UpperCamelCase_ : str = {}
UpperCamelCase_ : Union[str, Any] = '''gelu'''
def __init__( self : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any]=1_3 , lowerCAmelCase__ : List[str]=7 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : List[str]=9_9 , lowerCAmelCase__ : Tuple=1_6 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : Union[str, Any]=2_0 , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : int=1 , lowerCAmelCase__ : Any=0 , lowerCAmelCase__ : List[Any]=1_6 , lowerCAmelCase__ : Dict=1_6 , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : str = seq_length
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : List[Any] = vocab_size
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : int = intermediate_size
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Optional[int] = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Tuple = max_position_embeddings
_UpperCAmelCase : Any = eos_token_id
_UpperCAmelCase : Any = pad_token_id
_UpperCAmelCase : Dict = bos_token_id
_UpperCAmelCase : Optional[int] = embed_dim
_UpperCAmelCase : int = word_embed_proj_dim
_UpperCAmelCase : Optional[int] = False
def _lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase__ , **self.config_updates , )
_UpperCAmelCase : Union[str, Any] = prepare_opt_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : str = TFOPTModel(config=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = inputs_dict["input_ids"]
_UpperCAmelCase : Any = input_ids[:1, :]
_UpperCAmelCase : Tuple = inputs_dict["attention_mask"][:1, :]
_UpperCAmelCase : Any = 1
# first forward pass
_UpperCAmelCase : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCAmelCase : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCAmelCase : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCAmelCase : Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
_UpperCAmelCase : int = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCAmelCase : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1e-3 )
@require_tf
class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
UpperCamelCase_ : int = (TFOPTForCausalLM,) if is_tf_available() else ()
UpperCamelCase_ : Any = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Dict = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : str = 10
def _lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Any = TFOPTModelTester(self )
_UpperCAmelCase : Any = ConfigTester(self , config_class=lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase__ : str , lowerCAmelCase__ : Any ):
if hasattr(lowerCAmelCase__ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase__ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
_UpperCAmelCase : Tuple = model_class(config=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = _get_word_embedding_weight(lowerCAmelCase__ , model.get_input_embeddings() )
_UpperCAmelCase : Union[str, Any] = _get_word_embedding_weight(lowerCAmelCase__ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase__ )
_UpperCAmelCase : Dict = _get_word_embedding_weight(lowerCAmelCase__ , model.get_input_embeddings() )
_UpperCAmelCase : Dict = _get_word_embedding_weight(lowerCAmelCase__ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_UpperCAmelCase : List[str] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase__ )
# check that weights remain the same after resizing
_UpperCAmelCase : Optional[Any] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_UpperCAmelCase : List[str] = False
self.assertTrue(lowerCAmelCase__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase__ )
_UpperCAmelCase : str = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_UpperCAmelCase : Union[str, Any] = False
self.assertTrue(lowerCAmelCase__ )
def __UpperCAmelCase ( a_: str ):
return tf.constant(a_, dtype=tf.intaa )
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str = 99
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_UpperCAmelCase : str = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_UpperCAmelCase : Any = input_ids.shape[0]
_UpperCAmelCase : List[Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Dict = TFOPTModel.from_pretrained("facebook/opt-350m" )
_UpperCAmelCase : Tuple = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
_UpperCAmelCase : List[str] = tf.not_equal(lowerCAmelCase__ , model.config.pad_token_id )
with tf.GradientTape():
_UpperCAmelCase : Union[str, Any] = model(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ).last_hidden_state
_UpperCAmelCase : Tuple = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=4e-3 ) )
_UpperCAmelCase : int = tf.function(lowerCAmelCase__ , jit_compile=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = xla_generate(lowerCAmelCase__ , lowerCAmelCase__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=4e-2 ) )
@require_tf
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
super().setUp()
_UpperCAmelCase : Any = "facebook/opt-350m"
def _lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
_UpperCAmelCase : int = TFOPTForCausalLM.from_pretrained(self.path_model )
_UpperCAmelCase : Optional[Any] = GPTaTokenizer.from_pretrained(self.path_model )
_UpperCAmelCase : Union[str, Any] = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_UpperCAmelCase : List[Any] = tokenizer(lowerCAmelCase__ , return_tensors="tf" , padding=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_UpperCAmelCase : Optional[int] = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-4 ) )
_UpperCAmelCase : List[str] = tf.function(lowerCAmelCase__ , jit_compile=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-4 ) )
@require_tf
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = "facebook/opt-125m"
_UpperCAmelCase : Dict = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Any = GPTaTokenizer.from_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__ )
for prompt in self.prompts:
_UpperCAmelCase : str = tokenizer(lowerCAmelCase__ , return_tensors="tf" ).input_ids
_UpperCAmelCase : int = model.generate(lowerCAmelCase__ , max_length=1_0 )
_UpperCAmelCase : Any = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = "facebook/opt-350m"
_UpperCAmelCase : Tuple = GPTaTokenizer.from_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : int = "left"
# use different length sentences to test batching
_UpperCAmelCase : Union[str, Any] = [
"Hello, my dog is a little",
"Today, I",
]
_UpperCAmelCase : Dict = tokenizer(lowerCAmelCase__ , return_tensors="tf" , padding=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = inputs["input_ids"]
_UpperCAmelCase : Dict = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs["attention_mask"] )
_UpperCAmelCase : List[str] = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
_UpperCAmelCase : List[Any] = model.generate(input_ids=lowerCAmelCase__ )
_UpperCAmelCase : str = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
_UpperCAmelCase : Tuple = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
_UpperCAmelCase : Dict = model.generate(input_ids=lowerCAmelCase__ , max_length=model.config.max_length - num_paddings )
_UpperCAmelCase : Optional[Any] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : str = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Any = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = "facebook/opt-350m"
_UpperCAmelCase : int = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : str = GPTaTokenizer.from_pretrained(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowerCAmelCase__ )
for prompt in self.prompts:
_UpperCAmelCase : List[Any] = tokenizer(lowerCAmelCase__ , return_tensors="tf" ).input_ids
_UpperCAmelCase : Optional[int] = model.generate(lowerCAmelCase__ , max_length=1_0 )
_UpperCAmelCase : List[str] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) | 257 | '''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = AltDiffusionPipeline
UpperCamelCase_ : int = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase_ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : Any = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
_UpperCAmelCase : Tuple = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
_UpperCAmelCase : Dict = CLIPTextModel(lowerCAmelCase__ )
_UpperCAmelCase : str = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
_UpperCAmelCase : List[Any] = 7_7
_UpperCAmelCase : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]=0 ) -> Tuple:
"""simple docstring"""
if str(lowerCAmelCase__ ).startswith("mps" ):
_UpperCAmelCase : int = torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCAmelCase : List[Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
torch.manual_seed(0 )
_UpperCAmelCase : Tuple = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
_UpperCAmelCase : int = RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = text_encoder
_UpperCAmelCase : int = AltDiffusionPipeline(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Any = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = "A photo of an astronaut"
_UpperCAmelCase : Tuple = alt_pipe(**lowerCAmelCase__ )
_UpperCAmelCase : str = output.images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCAmelCase : int = np.array(
[0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : str = self.get_dummy_components()
_UpperCAmelCase : Any = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
_UpperCAmelCase : Any = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
_UpperCAmelCase : List[Any] = RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = text_encoder
_UpperCAmelCase : Any = AltDiffusionPipeline(**lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = alt_pipe(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = output.images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCAmelCase : Any = np.array(
[0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : Tuple = alt_pipe([prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2_0 , output_type="np" )
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : List[str] = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Tuple = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
_UpperCAmelCase : Tuple = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = "A painting of a squirrel eating a burger"
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Tuple = alt_pipe([prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="numpy" )
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : str = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 257 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
lowercase_ : List[str] = logging.get_logger(__name__)
lowercase_ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase_ : Optional[int] = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
lowercase_ : List[str] = {
'''bert-base-uncased''': 5_12,
'''bert-large-uncased''': 5_12,
'''bert-base-cased''': 5_12,
'''bert-large-cased''': 5_12,
'''bert-base-multilingual-uncased''': 5_12,
'''bert-base-multilingual-cased''': 5_12,
'''bert-base-chinese''': 5_12,
'''bert-base-german-cased''': 5_12,
'''bert-large-uncased-whole-word-masking''': 5_12,
'''bert-large-cased-whole-word-masking''': 5_12,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-base-cased-finetuned-mrpc''': 5_12,
'''bert-base-german-dbmdz-cased''': 5_12,
'''bert-base-german-dbmdz-uncased''': 5_12,
'''TurkuNLP/bert-base-finnish-cased-v1''': 5_12,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 5_12,
'''wietsedv/bert-base-dutch-cased''': 5_12,
}
lowercase_ : Dict = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_INIT_CONFIGURATION
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = BertTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=True , snake_case__="[UNK]" , snake_case__="[SEP]" , snake_case__="[PAD]" , snake_case__="[CLS]" , snake_case__="[MASK]" , snake_case__=True , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars
):
_SCREAMING_SNAKE_CASE : Optional[int] = getattr(snake_case__ , normalizer_state.pop("type" ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case
_SCREAMING_SNAKE_CASE : Optional[Any] = strip_accents
_SCREAMING_SNAKE_CASE : str = tokenize_chinese_chars
_SCREAMING_SNAKE_CASE : Optional[int] = normalizer_class(**snake_case__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__=None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 572 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
lowercase_ : int = '''bart'''
lowercase_ : Any = True
@st.cache(allow_output_mutation=lowerCamelCase__ )
def _lowerCAmelCase ( ) -> Union[str, Any]:
if LOAD_DENSE_INDEX:
_SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
_SCREAMING_SNAKE_CASE : Optional[int] = qar_model.eval()
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = (None, None)
if MODEL_TYPE == "bart":
_SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
_SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
_SCREAMING_SNAKE_CASE : List[Any] = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = sas_model.eval()
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = make_qa_sas_model(
model_name="t5-small", from_file="seq2seq_models/eli5_t5_model_1024_4.pth", device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCamelCase__ )
def _lowerCAmelCase ( ) -> Tuple:
if LOAD_DENSE_INDEX:
_SCREAMING_SNAKE_CASE : List[Any] = faiss.StandardGpuResources()
_SCREAMING_SNAKE_CASE : Optional[Any] = datasets.load_dataset(path="wiki_snippets", name="wiki40b_en_100_0" )["train"]
_SCREAMING_SNAKE_CASE : int = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat", dtype="float32", mode="r", shape=(wikiaab_passages.num_rows, 1_2_8), )
_SCREAMING_SNAKE_CASE : List[Any] = faiss.IndexFlatIP(1_2_8 )
_SCREAMING_SNAKE_CASE : Any = faiss.index_cpu_to_gpu(lowerCamelCase__, 1, lowerCamelCase__ )
wikiaab_gpu_index_flat.add(lowerCamelCase__ ) # TODO fix for larger GPU
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = (None, None)
_SCREAMING_SNAKE_CASE : Optional[Any] = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCamelCase__ )
def _lowerCAmelCase ( ) -> int:
_SCREAMING_SNAKE_CASE : Optional[Any] = datasets.load_dataset("eli5", name="LFQA_reddit" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = elia["train_eli5"]
_SCREAMING_SNAKE_CASE : Optional[int] = np.memmap(
"eli5_questions_reps.dat", dtype="float32", mode="r", shape=(elia_train.num_rows, 1_2_8) )
_SCREAMING_SNAKE_CASE : int = faiss.IndexFlatIP(1_2_8 )
eli5_train_q_index.add(lowerCamelCase__ )
return (elia_train, eli5_train_q_index)
lowercase_ , lowercase_ , lowercase_ : Any = load_indexes()
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = load_models()
lowercase_ , lowercase_ : Union[str, Any] = load_train_data()
def _lowerCAmelCase ( lowerCamelCase__ : Tuple, lowerCamelCase__ : Union[str, Any]=1_0 ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = embed_questions_for_retrieval([question], lowerCamelCase__, lowerCamelCase__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = eli5_train_q_index.search(lowerCamelCase__, lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = [elia_train[int(lowerCamelCase__ )] for i in I[0]]
return nn_examples
def _lowerCAmelCase ( lowerCamelCase__ : str, lowerCamelCase__ : List[Any]="wiki40b", lowerCamelCase__ : int="dense", lowerCamelCase__ : int=1_0 ) -> Any:
if source == "none":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = (" <P> ".join(["" for _ in range(1_1 )] ).strip(), [])
else:
if method == "dense":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = query_qa_dense_index(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = query_es_index(
lowerCamelCase__, lowerCamelCase__, index_name="english_wiki40b_snippets_100w", n_results=lowerCamelCase__, )
_SCREAMING_SNAKE_CASE : int = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
_SCREAMING_SNAKE_CASE : Dict = "question: {} context: {}".format(lowerCamelCase__, lowerCamelCase__ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCamelCase__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCamelCase__ : None),
} )
def _lowerCAmelCase ( lowerCamelCase__ : Optional[int], lowerCamelCase__ : Tuple, lowerCamelCase__ : Optional[int], lowerCamelCase__ : Optional[Any]=6_4, lowerCamelCase__ : Union[str, Any]=2_5_6, lowerCamelCase__ : Tuple=False, lowerCamelCase__ : List[Any]=2, lowerCamelCase__ : Tuple=0.95, lowerCamelCase__ : Union[str, Any]=0.8 ) -> str:
with torch.no_grad():
_SCREAMING_SNAKE_CASE : str = qa_sas_generate(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, num_answers=1, num_beams=lowerCamelCase__, min_len=lowerCamelCase__, max_len=lowerCamelCase__, do_sample=lowerCamelCase__, temp=lowerCamelCase__, top_p=lowerCamelCase__, top_k=lowerCamelCase__, max_input_length=1_0_2_4, device="cuda:0", )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
lowercase_ : Optional[Any] = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
lowercase_ : List[Any] = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
lowercase_ : int = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
lowercase_ : Any = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
lowercase_ : List[str] = st.sidebar.checkbox('''Demo options''')
if demo_options:
lowercase_ : str = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
lowercase_ : List[Any] = action_list.index(action_st)
lowercase_ : List[Any] = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
lowercase_ : str = show_type == '''Show full text of passages'''
else:
lowercase_ : Tuple = 3
lowercase_ : List[str] = True
lowercase_ : Union[str, Any] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
lowercase_ : Optional[Any] = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
lowercase_ : str = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
lowercase_ : List[Any] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
lowercase_ : Any = '''wiki40b'''
lowercase_ : List[Any] = '''dense'''
lowercase_ : Dict = '''beam'''
lowercase_ : List[Any] = 2
lowercase_ : int = 64
lowercase_ : Optional[int] = 2_56
lowercase_ : str = None
lowercase_ : List[Any] = None
lowercase_ : Optional[int] = st.sidebar.checkbox('''Generation options''')
if generate_options:
lowercase_ : Any = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
lowercase_ : List[Any] = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
lowercase_ : Optional[Any] = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
lowercase_ : str = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
lowercase_ : Optional[Any] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
lowercase_ : Optional[int] = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
lowercase_ : List[str] = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
lowercase_ : Optional[Any] = None
# start main text
lowercase_ : Dict = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
lowercase_ : List[Any] = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
lowercase_ : Dict = st.text_input('''Enter your question here:''', '''''')
else:
lowercase_ : int = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
lowercase_ , lowercase_ : Any = make_support(question, source=wiki_source, method='''dense''', n_results=10)
lowercase_ , lowercase_ : Optional[int] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
lowercase_ : List[str] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
lowercase_ : Optional[Any] = support_list[:10]
lowercase_ : Tuple = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
lowercase_ , lowercase_ : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
lowercase_ , lowercase_ : List[str] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
lowercase_ : Any = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
lowercase_ : Optional[int] = res[1].strip()
if sec_titles == "":
lowercase_ : List[Any] = '''[{}]({})'''.format(res[0], wiki_url)
else:
lowercase_ : Dict = sec_titles.split(''' & ''')
lowercase_ : Optional[int] = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
lowercase_ : List[Any] = find_nearest_training(question)
lowercase_ : Optional[int] = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
lowercase_ : Tuple = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
lowercase_ : Optional[int] = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 572 | 1 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_A : Tuple =logging.getLogger(__name__)
class _lowercase ( _lowercase ):
def __init__( self: Optional[Any] , UpperCamelCase__: List[str]=-1 ):
# in NER datasets, the last column is usually reserved for NER label
lowerCamelCase__ : Union[str, Any] = label_idx
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] , UpperCamelCase__: Union[Split, str] ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = mode.value
lowerCamelCase__ : Optional[Any] = os.path.join(UpperCamelCase__ , F'''{mode}.txt''' )
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Dict = []
with open(UpperCamelCase__ , encoding="""utf-8""" ) as f:
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Union[str, Any] = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=UpperCamelCase__ , labels=UpperCamelCase__ ) )
guid_index += 1
lowerCamelCase__ : int = []
lowerCamelCase__ : str = []
else:
lowerCamelCase__ : Any = line.split(""" """ )
words.append(splits[0] )
if len(UpperCamelCase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=UpperCamelCase__ , labels=UpperCamelCase__ ) )
return examples
def lowerCamelCase_ ( self: Any , UpperCamelCase__: TextIO , UpperCamelCase__: TextIO , UpperCamelCase__: List ):
lowerCamelCase__ : Any = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(UpperCamelCase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCamelCase__ : Any = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(UpperCamelCase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str ):
if path:
with open(UpperCamelCase__ , """r""" ) as f:
lowerCamelCase__ : Any = f.read().splitlines()
if "O" not in labels:
lowerCamelCase__ : Any = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _lowercase ( _lowercase ):
def __init__( self: int ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str ):
if path:
with open(UpperCamelCase__ , """r""" ) as f:
lowerCamelCase__ : Dict = f.read().splitlines()
if "O" not in labels:
lowerCamelCase__ : Optional[int] = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Union[Split, str] ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : Union[str, Any] = mode.value
lowerCamelCase__ : Tuple = os.path.join(UpperCamelCase__ , F'''{mode}.txt''' )
lowerCamelCase__ : int = 1
lowerCamelCase__ : List[str] = []
with open(UpperCamelCase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(UpperCamelCase__ ):
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Union[str, Any] = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=UpperCamelCase__ , labels=UpperCamelCase__ ) )
guid_index += 1
return examples
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: TextIO , UpperCamelCase__: TextIO , UpperCamelCase__: List ):
lowerCamelCase__ : str = 0
for sentence in parse_incr(UpperCamelCase__ ):
lowerCamelCase__ : Optional[int] = preds_list[example_id]
lowerCamelCase__ : Any = """"""
for token in sentence:
out += F'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(UpperCamelCase__ )
example_id += 1
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: str ):
if path:
with open(UpperCamelCase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 631 |
'''simple docstring'''
import sys
import turtle
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
triangle(UpperCamelCase , get_mid(UpperCamelCase , UpperCamelCase ) , get_mid(UpperCamelCase , UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
_A : Any =turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
_A : Dict =[(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 631 | 1 |
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( A__):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=2_048 ):
__snake_case : List[Any] = config.__dict__
__snake_case : Any = modal_hidden_size
if num_labels:
__snake_case : Union[str, Any] = num_labels
| 576 |
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
__lowerCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i]
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCAmelCase = []
__lowerCAmelCase = -1
for i in range(_lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
__lowerCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCAmelCase = 0
__lowerCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [0] * no_of_processes
for i in range(_lowerCAmelCase ):
__lowerCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE_ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 465 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
UpperCamelCase = {"allegro/herbert-base-cased": 514}
UpperCamelCase = {}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Union[str, Any] = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
_snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[str] = HerbertTokenizer
def __init__( self :Any , lowerCamelCase__ :Optional[Any]=None , lowerCamelCase__ :List[str]=None , lowerCamelCase__ :Dict=None , lowerCamelCase__ :List[Any]="<s>" , lowerCamelCase__ :Optional[Any]="<unk>" , lowerCamelCase__ :Dict="<pad>" , lowerCamelCase__ :Any="<mask>" , lowerCamelCase__ :Dict="</s>" , **lowerCamelCase__ :Optional[int] , ):
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , **lowerCamelCase__ , )
def __a ( self :int , lowerCamelCase__ :List[int] , lowerCamelCase__ :Optional[List[int]] = None ):
UpperCamelCase__ :str = [self.cls_token_id]
UpperCamelCase__ :int = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self :List[str] , lowerCamelCase__ :List[int] , lowerCamelCase__ :Optional[List[int]] = None , lowerCamelCase__ :bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def __a ( self :Tuple , lowerCamelCase__ :List[int] , lowerCamelCase__ :Optional[List[int]] = None ):
UpperCamelCase__ :Dict = [self.sep_token_id]
UpperCamelCase__ :Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self :List[str] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[str] = None ):
UpperCamelCase__ :Optional[int] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ ) | 383 |
def A ( lowercase__ : str ) -> bool:
UpperCamelCase__ :Any = 0
for ch in input_str:
UpperCamelCase__ :List[Any] = ord(lowercase__ )
UpperCamelCase__ :int = pow(2 , lowercase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 383 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : jnp.ndarray
_UpperCamelCase : jnp.ndarray
class A_ ( nn.Module ):
'''simple docstring'''
_UpperCamelCase : int
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
_UpperCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
lowercase = self.block_out_channels[i]
lowercase = self.block_out_channels[i + 1]
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(snake_case )
lowercase = blocks
lowercase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case ):
lowercase = self.conv_in(snake_case )
lowercase = nn.silu(snake_case )
for block in self.blocks:
lowercase = block(snake_case )
lowercase = nn.silu(snake_case )
lowercase = self.conv_out(snake_case )
return embedding
@flax_register_to_config
class A_ ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = 32
_UpperCamelCase : int = 4
_UpperCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_UpperCamelCase : Union[bool, Tuple[bool]] = False
_UpperCamelCase : Tuple[int] = (320, 640, 1280, 1280)
_UpperCamelCase : int = 2
_UpperCamelCase : Union[int, Tuple[int]] = 8
_UpperCamelCase : Optional[Union[int, Tuple[int]]] = None
_UpperCamelCase : int = 1280
_UpperCamelCase : float = 0.0
_UpperCamelCase : bool = False
_UpperCamelCase : jnp.dtype = jnp.floataa
_UpperCamelCase : bool = True
_UpperCamelCase : int = 0
_UpperCamelCase : str = "rgb"
_UpperCamelCase : Tuple[int] = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# init input tensors
lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase = jnp.ones((1,) , dtype=jnp.intaa )
lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase = jnp.zeros(snake_case , dtype=jnp.floataa )
lowercase , lowercase = jax.random.split(snake_case )
lowercase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(snake_case , snake_case , snake_case , snake_case , snake_case )["params"]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.block_out_channels
lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase = self.num_attention_heads or self.attention_head_dim
# input
lowercase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowercase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowercase = FlaxTimestepEmbedding(snake_case , dtype=self.dtype )
lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowercase = self.only_cross_attention
if isinstance(snake_case , snake_case ):
lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case , snake_case ):
lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
lowercase = []
lowercase = []
lowercase = block_out_channels[0]
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
lowercase = output_channel
lowercase = block_out_channels[i]
lowercase = i == len(snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase = FlaxCrossAttnDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowercase = FlaxDownBlockaD(
in_channels=snake_case , out_channels=snake_case , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case )
for _ in range(self.layers_per_block ):
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
if not is_final_block:
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(snake_case )
lowercase = down_blocks
lowercase = controlnet_down_blocks
# mid
lowercase = block_out_channels[-1]
lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=snake_case , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowercase = nn.Conv(
snake_case , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , snake_case , snake_case , snake_case , snake_case , snake_case = 1.0 , snake_case = True , snake_case = False , ):
lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase = jnp.flip(snake_case , axis=1 )
# 1. time
if not isinstance(snake_case , jnp.ndarray ):
lowercase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowercase = timesteps.astype(dtype=jnp.floataa )
lowercase = jnp.expand_dims(snake_case , 0 )
lowercase = self.time_proj(snake_case )
lowercase = self.time_embedding(snake_case )
# 2. pre-process
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.conv_in(snake_case )
lowercase = jnp.transpose(snake_case , (0, 2, 3, 1) )
lowercase = self.controlnet_cond_embedding(snake_case )
sample += controlnet_cond
# 3. down
lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case , snake_case ):
lowercase , lowercase = down_block(snake_case , snake_case , snake_case , deterministic=not train )
else:
lowercase , lowercase = down_block(snake_case , snake_case , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowercase = self.mid_block(snake_case , snake_case , snake_case , deterministic=not train )
# 5. contronet blocks
lowercase = ()
for down_block_res_sample, controlnet_block in zip(snake_case , self.controlnet_down_blocks ):
lowercase = controlnet_block(snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase = controlnet_down_block_res_samples
lowercase = self.controlnet_mid_block(snake_case )
# 6. scaling
lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=snake_case , mid_block_res_sample=snake_case )
| 84 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
a_ :List[Any] = "<<<<<<< This should probably be modified because it mentions: "
a_ :str = "=======\n>>>>>>>\n"
a_ :Union[str, Any] = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
a_ :Tuple = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def lowercase_ (A : Namespace ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
@staticmethod
def lowercase_ ( _snake_case : ArgumentParser ) ->Optional[int]:
snake_case__ : List[str] = parser.add_parser(
'convert', help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.', )
train_parser.add_argument(
'--tfds_path', type=_snake_case, required=_snake_case, help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.', )
train_parser.add_argument(
'--datasets_directory', type=_snake_case, required=_snake_case, help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=_snake_case )
def __init__( self : List[Any], _snake_case : str, _snake_case : str, *_snake_case : int ) ->Union[str, Any]:
snake_case__ : str = get_logger('datasets-cli/converting' )
snake_case__ : Optional[int] = tfds_path
snake_case__ : List[str] = datasets_directory
def lowercase_ ( self : List[Any] ) ->List[Any]:
if os.path.isdir(self._tfds_path ):
snake_case__ : Dict = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
snake_case__ : Any = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
snake_case__ : List[Any] = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
snake_case__ : Optional[int] = []
snake_case__ : Optional[int] = []
snake_case__ : Union[str, Any] = {}
if os.path.isdir(self._tfds_path ):
snake_case__ : Dict = os.listdir(_snake_case )
else:
snake_case__ : Optional[int] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
snake_case__ : Dict = os.path.join(_snake_case, _snake_case )
snake_case__ : Dict = os.path.join(_snake_case, _snake_case )
if not os.path.isfile(_snake_case ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(_snake_case, encoding='utf-8' ) as f:
snake_case__ : List[str] = f.readlines()
snake_case__ : Tuple = []
snake_case__ : Tuple = False
snake_case__ : Dict = False
snake_case__ : Optional[Any] = []
for line in lines:
snake_case__ : List[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
snake_case__ : List[str] = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
snake_case__ : Any = ''
continue
elif "from absl import logging" in out_line:
snake_case__ : Optional[Any] = 'from datasets import logging\n'
elif "getLogger" in out_line:
snake_case__ : List[str] = out_line.replace('getLogger', 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
snake_case__ : Dict = True
snake_case__ : List[Any] = list(filter(lambda _snake_case : e in out_line, _snake_case ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_snake_case ) + '\n' )
out_lines.append(_snake_case )
out_lines.append(_snake_case )
continue
else:
for pattern, replacement in TO_CONVERT:
snake_case__ : List[str] = re.sub(_snake_case, _snake_case, _snake_case )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
snake_case__ : Optional[Any] = re.match(R'from\stensorflow_datasets.*import\s([^\.\r\n]+)', _snake_case )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
snake_case__ : Optional[int] = 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
snake_case__ : str = True
out_lines.append(_snake_case )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
snake_case__ : Optional[int] = f_name.replace('.py', '' )
snake_case__ : Optional[Any] = os.path.join(_snake_case, _snake_case )
snake_case__ : List[str] = os.path.join(_snake_case, _snake_case )
os.makedirs(_snake_case, exist_ok=_snake_case )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_snake_case )
if needs_manual_update:
with_manual_update.append(_snake_case )
with open(_snake_case, 'w', encoding='utf-8' ) as f:
f.writelines(_snake_case )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
snake_case__ : Tuple = os.path.basename(_snake_case )
snake_case__ : Union[str, Any] = imports_to_builder_map[f_name.replace('.py', '' )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(_snake_case, _snake_case )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 478 | 0 |
def UpperCAmelCase ( A__ ) -> float:
_snake_case : Any = 0
while len(A__ ) > 1:
_snake_case : Optional[Any] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
_snake_case : Union[str, Any] = files.index(min(A__ ) )
temp += files[min_index]
files.pop(A__ )
files.append(A__ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 519 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'blip_2_vision_model'
def __init__( self , SCREAMING_SNAKE_CASE__=14_08 , SCREAMING_SNAKE_CASE__=61_44 , SCREAMING_SNAKE_CASE__=39 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=14 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0_0001 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=1e-10 , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = hidden_size
_snake_case : int = intermediate_size
_snake_case : int = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : Tuple = patch_size
_snake_case : Optional[int] = image_size
_snake_case : Tuple = initializer_range
_snake_case : List[str] = attention_dropout
_snake_case : Any = layer_norm_eps
_snake_case : int = hidden_act
_snake_case : List[Any] = qkv_bias
@classmethod
def __lowerCamelCase( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : Any = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
_snake_case : List[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'blip_2_qformer'
def __init__( self , SCREAMING_SNAKE_CASE__=3_05_22 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1e-12 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=14_08 , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_snake_case : int = vocab_size
_snake_case : Optional[Any] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : str = hidden_act
_snake_case : Dict = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : str = max_position_embeddings
_snake_case : Tuple = initializer_range
_snake_case : str = layer_norm_eps
_snake_case : Optional[int] = position_embedding_type
_snake_case : Any = cross_attention_frequency
_snake_case : int = encoder_hidden_size
@classmethod
def __lowerCamelCase( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : Union[str, Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
_snake_case : Optional[Any] = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'blip-2'
SCREAMING_SNAKE_CASE_ = True
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=32 , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
if vision_config is None:
_snake_case : Any = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
_snake_case : Union[str, Any] = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
_snake_case : str = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
_snake_case : Union[str, Any] = BlipaVisionConfig(**SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = BlipaQFormerConfig(**SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
_snake_case : Union[str, Any] = CONFIG_MAPPING[text_model_type](**SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = self.text_config.tie_word_embeddings
_snake_case : Optional[int] = self.text_config.is_encoder_decoder
_snake_case : Tuple = num_query_tokens
_snake_case : Tuple = self.vision_config.hidden_size
_snake_case : int = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_snake_case : List[str] = 1.0
_snake_case : int = 0.02
@classmethod
def __lowerCamelCase( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **SCREAMING_SNAKE_CASE__ , )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Any = copy.deepcopy(self.__dict__ )
_snake_case : Union[str, Any] = self.vision_config.to_dict()
_snake_case : Optional[int] = self.qformer_config.to_dict()
_snake_case : str = self.text_config.to_dict()
_snake_case : Optional[Any] = self.__class__.model_type
return output
| 519 | 1 |
'''simple docstring'''
class snake_case :
def __init__( self ,UpperCAmelCase_ ) -> Union[str, Any]:
# we need a list not a string, so do something to change the type
lowercase__ = arr.split("," )
def _a ( self ) -> Union[str, Any]:
lowercase__ = [int(self.array[0] )] * len(self.array )
lowercase__ = [int(self.array[0] )] * len(self.array )
for i in range(1 ,len(self.array ) ):
lowercase__ = max(
int(self.array[i] ) + sum_value[i - 1] ,int(self.array[i] ) )
lowercase__ = max(sum_value[i] ,rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input("please input some numbers:")
SCREAMING_SNAKE_CASE__ = SubArray(whole_array)
SCREAMING_SNAKE_CASE__ = array.solve_sub_array()
print(("the results is:", re))
| 267 |
'''simple docstring'''
def lowerCamelCase ( _snake_case : list ):
'''simple docstring'''
if not isinstance(_snake_case ,_snake_case ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(_snake_case ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(_snake_case ) == 1:
return True
lowercase__ = series[1] - series[0]
for index in range(len(_snake_case ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def lowerCamelCase ( _snake_case : list ):
'''simple docstring'''
if not isinstance(_snake_case ,_snake_case ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(_snake_case ) == 0:
raise ValueError("Input list must be a non empty list" )
lowercase__ = 0
for val in series:
answer += val
return answer / len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 267 | 1 |
from math import isqrt
def _UpperCamelCase ( lowercase__ ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(lowercase__ ) + 1 ) )
def _UpperCamelCase ( lowercase__ = 10**6 ):
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : str = 1
__SCREAMING_SNAKE_CASE : Optional[Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowercase__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 702 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 10
def __magic_name__( self :Dict , **lowerCAmelCase__ :Any ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = {
'''num_train_timesteps''': 1_100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCAmelCase__ )
return config
def __magic_name__( self :str ) -> Optional[Any]:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def __magic_name__( self :str ) -> List[str]:
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> Any:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> int:
__SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Dict = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE : Any = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def __magic_name__( self :Union[str, Any] ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
__SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE : Dict = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.prev_sample
__SCREAMING_SNAKE_CASE : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def __magic_name__( self :Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Any = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = self.dummy_model()
__SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample
__SCREAMING_SNAKE_CASE : Dict = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def __magic_name__( self :List[Any] ) -> int:
__SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE : Any = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = output.prev_sample
__SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
| 260 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 143 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''YolosFeatureExtractor''']
__A = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 593 | 0 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A( snake_case_ ):
"""simple docstring"""
random.seed(snake_case_ )
np.random.seed(snake_case_ )
torch.manual_seed(snake_case_ )
torch.cuda.manual_seed_all(snake_case_ )
# ^^ safe to call this function even if cuda is not available
class _a :
'''simple docstring'''
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ = 0.99_99 , UpperCAmelCase_ = 0.0 , UpperCAmelCase_ = 0 , UpperCAmelCase_ = False , UpperCAmelCase_ = 1.0 , UpperCAmelCase_ = 2 / 3 , UpperCAmelCase_ = None , UpperCAmelCase_ = None , **UpperCAmelCase_ , ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , torch.nn.Module):
lowercase__: Tuple = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , UpperCAmelCase_ , standard_warn=UpperCAmelCase_ , )
lowercase__: List[str] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
lowercase__: List[str] = True
if kwargs.get("max_value" , UpperCAmelCase_) is not None:
lowercase__: Dict = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , UpperCAmelCase_ , standard_warn=UpperCAmelCase_)
lowercase__: List[str] = kwargs["max_value"]
if kwargs.get("min_value" , UpperCAmelCase_) is not None:
lowercase__: int = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , UpperCAmelCase_ , standard_warn=UpperCAmelCase_)
lowercase__: int = kwargs["min_value"]
lowercase__: Any = list(UpperCAmelCase_)
lowercase__: str = [p.clone().detach() for p in parameters]
if kwargs.get("device" , UpperCAmelCase_) is not None:
lowercase__: List[Any] = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , UpperCAmelCase_ , standard_warn=UpperCAmelCase_)
self.to(device=kwargs["device"])
lowercase__: int = None
lowercase__: str = decay
lowercase__: List[str] = min_decay
lowercase__: Optional[Any] = update_after_step
lowercase__: Dict = use_ema_warmup
lowercase__: str = inv_gamma
lowercase__: List[Any] = power
lowercase__: Any = 0
lowercase__: Dict = None # set in `step()`
lowercase__: str = model_cls
lowercase__: int = model_config
@classmethod
def __lowercase ( cls , UpperCAmelCase_ , UpperCAmelCase_) -> "EMAModel":
'''simple docstring'''
lowercase__ , lowercase__: List[Any] = model_cls.load_config(UpperCAmelCase_ , return_unused_kwargs=UpperCAmelCase_)
lowercase__: int = model_cls.from_pretrained(UpperCAmelCase_)
lowercase__: Dict = cls(model.parameters() , model_cls=UpperCAmelCase_ , model_config=model.config)
ema_model.load_state_dict(UpperCAmelCase_)
return ema_model
def __lowercase ( self , UpperCAmelCase_) -> str:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__.")
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__.")
lowercase__: Optional[Any] = self.model_cls.from_config(self.model_config)
lowercase__: int = self.state_dict()
state_dict.pop("shadow_params" , UpperCAmelCase_)
model.register_to_config(**UpperCAmelCase_)
self.copy_to(model.parameters())
model.save_pretrained(UpperCAmelCase_)
def __lowercase ( self , UpperCAmelCase_) -> float:
'''simple docstring'''
lowercase__: Dict = max(0 , optimization_step - self.update_after_step - 1)
if step <= 0:
return 0.0
if self.use_ema_warmup:
lowercase__: int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
lowercase__: Any = (1 + step) / (10 + step)
lowercase__: int = min(UpperCAmelCase_ , self.decay)
# make sure decay is not smaller than min_decay
lowercase__: Dict = max(UpperCAmelCase_ , self.min_decay)
return cur_decay_value
@torch.no_grad()
def __lowercase ( self , UpperCAmelCase_) -> Optional[Any]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , torch.nn.Module):
lowercase__: Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , UpperCAmelCase_ , standard_warn=UpperCAmelCase_ , )
lowercase__: Any = parameters.parameters()
lowercase__: Dict = list(UpperCAmelCase_)
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
lowercase__: Any = self.get_decay(self.optimization_step)
lowercase__: Optional[Any] = decay
lowercase__: Tuple = 1 - decay
lowercase__: Dict = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , UpperCAmelCase_):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
lowercase__: Optional[int] = deepspeed.zero.GatheredParameters(UpperCAmelCase_ , modifier_rank=UpperCAmelCase_)
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param))
else:
s_param.copy_(UpperCAmelCase_)
def __lowercase ( self , UpperCAmelCase_) -> None:
'''simple docstring'''
lowercase__: Tuple = list(UpperCAmelCase_)
for s_param, param in zip(self.shadow_params , UpperCAmelCase_):
param.data.copy_(s_param.to(param.device).data)
def __lowercase ( self , UpperCAmelCase_=None , UpperCAmelCase_=None) -> None:
'''simple docstring'''
lowercase__: Tuple = [
p.to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_) if p.is_floating_point() else p.to(device=UpperCAmelCase_)
for p in self.shadow_params
]
def __lowercase ( self) -> dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __lowercase ( self , UpperCAmelCase_) -> None:
'''simple docstring'''
lowercase__: Union[str, Any] = [param.detach().cpu().clone() for param in parameters]
def __lowercase ( self , UpperCAmelCase_) -> None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`")
for c_param, param in zip(self.temp_stored_params , UpperCAmelCase_):
param.data.copy_(c_param.data)
# Better memory-wise.
lowercase__: Optional[int] = None
def __lowercase ( self , UpperCAmelCase_) -> None:
'''simple docstring'''
lowercase__: Union[str, Any] = copy.deepcopy(UpperCAmelCase_)
lowercase__: Any = state_dict.get("decay" , self.decay)
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1")
lowercase__: Optional[int] = state_dict.get("min_decay" , self.min_decay)
if not isinstance(self.min_decay , UpperCAmelCase_):
raise ValueError("Invalid min_decay")
lowercase__: List[Any] = state_dict.get("optimization_step" , self.optimization_step)
if not isinstance(self.optimization_step , UpperCAmelCase_):
raise ValueError("Invalid optimization_step")
lowercase__: Optional[Any] = state_dict.get("update_after_step" , self.update_after_step)
if not isinstance(self.update_after_step , UpperCAmelCase_):
raise ValueError("Invalid update_after_step")
lowercase__: Any = state_dict.get("use_ema_warmup" , self.use_ema_warmup)
if not isinstance(self.use_ema_warmup , UpperCAmelCase_):
raise ValueError("Invalid use_ema_warmup")
lowercase__: Union[str, Any] = state_dict.get("inv_gamma" , self.inv_gamma)
if not isinstance(self.inv_gamma , (float, int)):
raise ValueError("Invalid inv_gamma")
lowercase__: int = state_dict.get("power" , self.power)
if not isinstance(self.power , (float, int)):
raise ValueError("Invalid power")
lowercase__: Optional[Any] = state_dict.get("shadow_params" , UpperCAmelCase_)
if shadow_params is not None:
lowercase__: Optional[int] = shadow_params
if not isinstance(self.shadow_params , UpperCAmelCase_):
raise ValueError("shadow_params must be a list")
if not all(isinstance(UpperCAmelCase_ , torch.Tensor) for p in self.shadow_params):
raise ValueError("shadow_params must all be Tensors")
| 120 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _a :
'''simple docstring'''
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=13 , UpperCAmelCase_=7 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=99 , UpperCAmelCase_=32 , UpperCAmelCase_=5 , UpperCAmelCase_=4 , UpperCAmelCase_=37 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=512 , UpperCAmelCase_=16 , UpperCAmelCase_=2 , UpperCAmelCase_=0.02 , UpperCAmelCase_=3 , UpperCAmelCase_=4 , UpperCAmelCase_=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Any = parent
lowercase__: List[str] = batch_size
lowercase__: Dict = seq_length
lowercase__: Dict = is_training
lowercase__: List[str] = use_input_mask
lowercase__: Dict = use_token_type_ids
lowercase__: Optional[Any] = use_labels
lowercase__: str = vocab_size
lowercase__: Optional[int] = hidden_size
lowercase__: List[Any] = num_hidden_layers
lowercase__: Tuple = num_attention_heads
lowercase__: Optional[Any] = intermediate_size
lowercase__: Any = hidden_act
lowercase__: Optional[int] = hidden_dropout_prob
lowercase__: Optional[int] = attention_probs_dropout_prob
lowercase__: Dict = max_position_embeddings
lowercase__: Dict = type_vocab_size
lowercase__: Dict = type_sequence_label_size
lowercase__: List[str] = initializer_range
lowercase__: Tuple = num_labels
lowercase__: int = num_choices
lowercase__: Optional[int] = scope
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__: Union[str, Any] = None
if self.use_input_mask:
lowercase__: Tuple = random_attention_mask([self.batch_size, self.seq_length])
lowercase__: int = None
if self.use_token_type_ids:
lowercase__: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase__: Union[str, Any] = None
lowercase__: List[Any] = None
lowercase__: Tuple = None
if self.use_labels:
lowercase__: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__: Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices)
lowercase__: int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self) -> str:
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) -> Union[str, Any]:
'''simple docstring'''
lowercase__: List[str] = LlamaModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowercase__: Optional[int] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
lowercase__: int = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) -> Tuple:
'''simple docstring'''
lowercase__: Tuple = True
lowercase__: Union[str, Any] = LlamaModel(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowercase__: Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , )
lowercase__: Union[str, Any] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , )
lowercase__: List[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) -> Tuple:
'''simple docstring'''
lowercase__: Tuple = LlamaForCausalLM(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowercase__: Optional[int] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ) -> Optional[Any]:
'''simple docstring'''
lowercase__: int = True
lowercase__: List[Any] = True
lowercase__: Any = LlamaForCausalLM(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# first forward pass
lowercase__: List[Any] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ , )
lowercase__: Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__: Dict = ids_tensor((self.batch_size, 3) , config.vocab_size)
lowercase__: List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
lowercase__: Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1)
lowercase__: List[Any] = torch.cat([input_mask, next_mask] , dim=-1)
lowercase__: List[str] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , )["hidden_states"][0]
lowercase__: Optional[Any] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , )["hidden_states"][0]
# select random slice
lowercase__: Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
lowercase__: List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__: Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3))
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: str = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
): int = config_and_inputs
lowercase__: Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _a ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
UpperCamelCase__ = (LlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
lowercase__: int = LlamaModelTester(self)
lowercase__: str = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __lowercase ( self) -> Any:
'''simple docstring'''
lowercase__: List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__: Union[str, Any] = type
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
lowercase__ , lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: List[Any] = 3
lowercase__: Optional[int] = input_dict["input_ids"]
lowercase__: List[str] = input_ids.ne(1).to(UpperCAmelCase_)
lowercase__: List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
lowercase__: str = LlamaForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowercase__: Union[str, Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__ , lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: Tuple = 3
lowercase__: List[Any] = "single_label_classification"
lowercase__: Dict = input_dict["input_ids"]
lowercase__: Dict = input_ids.ne(1).to(UpperCAmelCase_)
lowercase__: Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
lowercase__: List[Any] = LlamaForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowercase__: Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__ , lowercase__: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: Union[str, Any] = 3
lowercase__: List[str] = "multi_label_classification"
lowercase__: Dict = input_dict["input_ids"]
lowercase__: str = input_ids.ne(1).to(UpperCAmelCase_)
lowercase__: Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
lowercase__: int = LlamaForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowercase__: Union[str, Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test")
def __lowercase ( self) -> Any:
'''simple docstring'''
pass
@parameterized.expand([("linear",), ("dynamic",)])
def __lowercase ( self , UpperCAmelCase_) -> List[Any]:
'''simple docstring'''
lowercase__ , lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: Optional[Any] = ids_tensor([1, 10] , config.vocab_size)
lowercase__: List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
lowercase__: Optional[Any] = LlamaModel(UpperCAmelCase_)
original_model.to(UpperCAmelCase_)
original_model.eval()
lowercase__: int = original_model(UpperCAmelCase_).last_hidden_state
lowercase__: int = original_model(UpperCAmelCase_).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
lowercase__: str = {"type": scaling_type, "factor": 10.0}
lowercase__: str = LlamaModel(UpperCAmelCase_)
scaled_model.to(UpperCAmelCase_)
scaled_model.eval()
lowercase__: Optional[Any] = scaled_model(UpperCAmelCase_).last_hidden_state
lowercase__: Optional[Any] = scaled_model(UpperCAmelCase_).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-5))
else:
self.assertFalse(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-5))
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!")
@slow
def __lowercase ( self) -> str:
'''simple docstring'''
lowercase__: Optional[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__: Optional[int] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto")
lowercase__: str = model(torch.tensor([input_ids]))
# Expected mean on dim = -1
lowercase__: int = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]])
torch.testing.assert_close(out.mean(-1) , UpperCAmelCase_ , atol=1E-2 , rtol=1E-2)
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase__: Dict = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase_ , atol=1E-5 , rtol=1E-5)
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!")
@slow
def __lowercase ( self) -> Any:
'''simple docstring'''
lowercase__: List[Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__: List[Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto")
lowercase__: Optional[Any] = model(torch.tensor(UpperCAmelCase_))
# Expected mean on dim = -1
lowercase__: Union[str, Any] = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]])
torch.testing.assert_close(out.mean(-1) , UpperCAmelCase_ , atol=1E-2 , rtol=1E-2)
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase__: Dict = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase_ , atol=1E-5 , rtol=1E-5)
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!")
@slow
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: Optional[int] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__: Optional[int] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto")
lowercase__: str = model(torch.tensor(UpperCAmelCase_))
# Expected mean on dim = -1
lowercase__: List[Any] = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]])
torch.testing.assert_close(out.mean(-1) , UpperCAmelCase_ , atol=1E-2 , rtol=1E-2)
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase__: Dict = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13])
# fmt: on
torch.testing.assert_close(out.mean(-1) , UpperCAmelCase_ , atol=1E-2 , rtol=1E-2)
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test")
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
lowercase__: List[str] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto")
lowercase__: Optional[Any] = model(torch.tensor(UpperCAmelCase_))
lowercase__: Any = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa)
torch.testing.assert_close(out.mean(-1) , UpperCAmelCase_ , atol=1E-2 , rtol=1E-2)
# fmt: off
lowercase__: List[str] = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12])
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase_ , atol=1E-5 , rtol=1E-5)
@unittest.skip("Model is curently gated")
@slow
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: List[Any] = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
lowercase__: List[str] = "Simply put, the theory of relativity states that "
lowercase__: Dict = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf")
lowercase__: Tuple = tokenizer.encode(UpperCAmelCase_ , return_tensors="pt")
lowercase__: Tuple = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=UpperCAmelCase_)
# greedy generation outputs
lowercase__: List[str] = model.generate(UpperCAmelCase_ , max_new_tokens=64 , top_p=UpperCAmelCase_ , temperature=1 , do_sample=UpperCAmelCase_)
lowercase__: Any = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
| 120 | 1 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=True , __lowerCamelCase : int="pt" ) -> int:
_snake_case = {'''add_prefix_space''': True} if isinstance(__lowerCamelCase , __lowerCamelCase ) and not line.startswith(''' ''' ) else {}
_snake_case = padding_side
return tokenizer(
[line] , max_length=__lowerCamelCase , padding='''max_length''' if pad_to_max_length else None , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase , add_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None , ) -> Any:
_snake_case = input_ids.ne(__lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCAmelCase__ ( A_ ):
def __init__( self : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str="train" , _lowerCamelCase : List[str]=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Union[str, Any]="" , ):
super().__init__()
_snake_case = Path(_lowerCamelCase ).joinpath(type_path + '''.source''' )
_snake_case = Path(_lowerCamelCase ).joinpath(type_path + '''.target''' )
_snake_case = self.get_char_lens(self.src_file )
_snake_case = max_source_length
_snake_case = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_snake_case = tokenizer
_snake_case = prefix
if n_obs is not None:
_snake_case = self.src_lens[:n_obs]
_snake_case = src_lang
_snake_case = tgt_lang
def __len__( self : List[str] ):
return len(self.src_lens )
def __getitem__( self : str , _lowerCamelCase : List[str] ):
_snake_case = index + 1 # linecache starts at 1
_snake_case = self.prefix + linecache.getline(str(self.src_file ) , _lowerCamelCase ).rstrip('''\n''' )
_snake_case = linecache.getline(str(self.tgt_file ) , _lowerCamelCase ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_snake_case = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _lowerCamelCase ) else self.tokenizer
)
_snake_case = self.tokenizer.generator if isinstance(self.tokenizer , _lowerCamelCase ) else self.tokenizer
_snake_case = encode_line(_lowerCamelCase , _lowerCamelCase , self.max_source_length , '''right''' )
_snake_case = encode_line(_lowerCamelCase , _lowerCamelCase , self.max_target_length , '''right''' )
_snake_case = source_inputs['''input_ids'''].squeeze()
_snake_case = target_inputs['''input_ids'''].squeeze()
_snake_case = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowercase ( _lowerCamelCase : Optional[Any] ):
return [len(_lowerCamelCase ) for x in Path(_lowerCamelCase ).open().readlines()]
def lowercase ( self : int , _lowerCamelCase : int ):
_snake_case = torch.stack([x['''input_ids'''] for x in batch] )
_snake_case = torch.stack([x['''attention_mask'''] for x in batch] )
_snake_case = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_snake_case = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _lowerCamelCase )
else self.tokenizer.pad_token_id
)
_snake_case = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _lowerCamelCase )
else self.tokenizer.pad_token_id
)
_snake_case = trim_batch(_lowerCamelCase , _lowerCamelCase )
_snake_case , _snake_case = trim_batch(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase )
_snake_case = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def _UpperCAmelCase ( __lowerCamelCase : List[List] ) -> Any:
return list(itertools.chain.from_iterable(__lowerCamelCase ) )
def _UpperCAmelCase ( __lowerCamelCase : str ) -> None:
_snake_case = get_git_info()
save_json(__lowerCamelCase , os.path.join(__lowerCamelCase , '''git_log.json''' ) )
def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=4 , **__lowerCamelCase : Union[str, Any] ) -> str:
with open(__lowerCamelCase , '''w''' ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase , **__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Union[str, Any]:
with open(__lowerCamelCase ) as f:
return json.load(__lowerCamelCase )
def _UpperCAmelCase ( ) -> str:
_snake_case = git.Repo(search_parent_directories=__lowerCamelCase )
_snake_case = {
'''repo_id''': str(__lowerCamelCase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def _UpperCAmelCase ( __lowerCamelCase : Callable , __lowerCamelCase : Iterable ) -> List:
return list(map(__lowerCamelCase , __lowerCamelCase ) )
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> Tuple:
with open(__lowerCamelCase , '''wb''' ) as f:
return pickle.dump(__lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Optional[int]:
def remove_articles(__lowerCamelCase : Tuple ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , __lowerCamelCase )
def white_space_fix(__lowerCamelCase : int ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase : List[Any] ):
_snake_case = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase : List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : List[Any] ) -> Any:
_snake_case = normalize_answer(__lowerCamelCase ).split()
_snake_case = normalize_answer(__lowerCamelCase ).split()
_snake_case = Counter(__lowerCamelCase ) & Counter(__lowerCamelCase )
_snake_case = sum(common.values() )
if num_same == 0:
return 0
_snake_case = 1.0 * num_same / len(__lowerCamelCase )
_snake_case = 1.0 * num_same / len(__lowerCamelCase )
_snake_case = (2 * precision * recall) / (precision + recall)
return fa
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : int ) -> Any:
return normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ) -> Dict:
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
_snake_case = 0
for hypo, pred in zip(__lowerCamelCase , __lowerCamelCase ):
em += exact_match_score(__lowerCamelCase , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
em /= len(__lowerCamelCase )
return {"em": em}
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Optional[int]:
return model_prefix.startswith('''rag''' )
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
_snake_case = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_snake_case = '''dropout_rate'''
for p in extra_params:
if getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if not hasattr(__lowerCamelCase , __lowerCamelCase ) and not hasattr(__lowerCamelCase , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(__lowerCamelCase ) )
delattr(__lowerCamelCase , __lowerCamelCase )
continue
_snake_case = p if hasattr(__lowerCamelCase , __lowerCamelCase ) else equivalent_param[p]
setattr(__lowerCamelCase , __lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
delattr(__lowerCamelCase , __lowerCamelCase )
return hparams, config
| 224 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> str:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Dict:
_snake_case = create_tensor(__lowerCamelCase )
_snake_case = gather(__lowerCamelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Tuple:
_snake_case = [state.process_index]
_snake_case = gather_object(__lowerCamelCase )
assert len(__lowerCamelCase ) == state.num_processes, f'''{gathered_obj}, {len(__lowerCamelCase )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def _UpperCAmelCase ( __lowerCamelCase : int ) -> Union[str, Any]:
_snake_case = create_tensor(__lowerCamelCase )
_snake_case = broadcast(__lowerCamelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _UpperCAmelCase ( __lowerCamelCase : str ) -> int:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
_snake_case = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_snake_case = torch.arange(state.num_processes ).to(state.device )
_snake_case = pad_across_processes(__lowerCamelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _UpperCAmelCase ( __lowerCamelCase : Any ) -> List[str]:
# For now runs on only two processes
if state.num_processes != 2:
return
_snake_case = create_tensor(__lowerCamelCase )
_snake_case = reduce(__lowerCamelCase , '''sum''' )
_snake_case = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), f'''{reduced_tensor} != {truth_tensor}'''
def _UpperCAmelCase ( __lowerCamelCase : int ) -> Optional[int]:
# For now runs on only two processes
if state.num_processes != 2:
return
_snake_case = create_tensor(__lowerCamelCase )
_snake_case = reduce(__lowerCamelCase , '''mean''' )
_snake_case = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), f'''{reduced_tensor} != {truth_tensor}'''
def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> List[Any]:
# For xla_spawn (TPUs)
main()
def _UpperCAmelCase ( ) -> Optional[Any]:
_snake_case = PartialState()
state.print(f'''State: {state}''' )
state.print('''testing gather''' )
test_gather(__lowerCamelCase )
state.print('''testing gather_object''' )
test_gather_object(__lowerCamelCase )
state.print('''testing broadcast''' )
test_broadcast(__lowerCamelCase )
state.print('''testing pad_across_processes''' )
test_pad_across_processes(__lowerCamelCase )
state.print('''testing reduce_sum''' )
test_reduce_sum(__lowerCamelCase )
state.print('''testing reduce_mean''' )
test_reduce_mean(__lowerCamelCase )
if __name__ == "__main__":
main()
| 224 | 1 |
'''simple docstring'''
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self ) -> Tuple:
super().__init__()
_a = nn.Linear(3 , 4 )
_a = nn.BatchNormad(4 )
_a = nn.Linear(4 , 5 )
def a_ ( self , __UpperCamelCase ) -> Tuple:
return self.lineara(self.batchnorm(self.lineara(__UpperCamelCase ) ) )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def a_ ( self , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) -> Any:
return (args[0] + 1,) + args[1:], kwargs
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def a_ ( self , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
return output + 1
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def a_ ( self ) -> List[Any]:
_a = ModelForTest()
_a = ModelHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
self.assertEqual(test_model._hf_hook , __UpperCamelCase )
self.assertTrue(hasattr(__UpperCamelCase , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(__UpperCamelCase )
self.assertFalse(hasattr(__UpperCamelCase , "_hf_hook" ) )
self.assertFalse(hasattr(__UpperCamelCase , "_old_forward" ) )
def a_ ( self ) -> List[Any]:
_a = ModelForTest()
_a = ModelHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
add_hook_to_module(__UpperCamelCase , __UpperCamelCase , append=__UpperCamelCase )
self.assertEqual(isinstance(test_model._hf_hook , __UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__UpperCamelCase , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(__UpperCamelCase )
self.assertFalse(hasattr(__UpperCamelCase , "_hf_hook" ) )
self.assertFalse(hasattr(__UpperCamelCase , "_old_forward" ) )
def a_ ( self ) -> Optional[int]:
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(x + 1 )
_a = test_model(x + 2 )
_a = PreForwardHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
_a = test_model(__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a = PreForwardHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
_a = test_model(__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
_a = test_model(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 )
def a_ ( self ) -> Dict:
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(__UpperCamelCase )
_a = PostForwardHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
_a = test_model(__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a = PostForwardHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
_a = test_model(__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
_a = test_model(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , output + 2 , atol=1e-5 )
def a_ ( self ) -> Optional[Any]:
_a = ModelForTest()
_a = torch.randn(2 , 3 )
_a = test_model(__UpperCamelCase )
_a = PostForwardHook()
add_hook_to_module(__UpperCamelCase , __UpperCamelCase )
_a = test_model(__UpperCamelCase )
self.assertTrue(torch.allclose(__UpperCamelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_a = True
_a = test_model(__UpperCamelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a_ ( self ) -> Tuple:
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_a = torch.randn(2 , 3 )
_a = model(__UpperCamelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__UpperCamelCase , AlignDevicesHook(io_same_device=__UpperCamelCase ) )
_a = torch.randn(2 , 3 ).to(0 )
_a = model(__UpperCamelCase )
self.assertEqual(output.device , torch.device(0 ) )
def a_ ( self ) -> Union[str, Any]:
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
_a = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCamelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__UpperCamelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCamelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(hook_kwargs["execution_device"] )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCamelCase )
_a = torch.randn(2 , 3 )
_a = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
_a = {
"execution_device": 0 if torch.cuda.is_available() else "cpu",
"offload": True,
"offload_buffers": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCamelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__UpperCamelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCamelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
_a = torch.randn(2 , 3 )
_a = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def a_ ( self ) -> Union[str, Any]:
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
_a = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(__UpperCamelCase , execution_device=__UpperCamelCase , offload=__UpperCamelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(__UpperCamelCase )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCamelCase )
_a = torch.randn(2 , 3 )
_a = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__UpperCamelCase , execution_device=__UpperCamelCase , offload=__UpperCamelCase , offload_buffers=__UpperCamelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
_a = torch.randn(2 , 3 )
_a = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def a_ ( self ) -> List[Any]:
_a = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
_a = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(
__UpperCamelCase , execution_device=__UpperCamelCase , offload=__UpperCamelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a = torch.device(__UpperCamelCase )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCamelCase )
_a = torch.randn(2 , 3 )
_a = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__UpperCamelCase , execution_device=__UpperCamelCase , offload=__UpperCamelCase , weights_map=model.state_dict() , offload_buffers=__UpperCamelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
_a = torch.randn(2 , 3 )
_a = model(__UpperCamelCase )
self.assertEqual(output.device , __UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
| 276 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 276 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , a_ , a_=1_3 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=9_9 , a_=3_2 , a_=5 , a_=4 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=1_6 , a_=2 , a_=0.02 , a_=4 , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_attention_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_choices
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_attention_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowercase_ ( a , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Dict = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = FlaxRoFormerModelTester(self )
@slow
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=a_ )
UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
@require_flax
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase = model(a_ )[0]
UpperCAmelCase = 5_0_0_0_0
UpperCAmelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , a_ )
UpperCAmelCase = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , a_ , atol=1E-4 ) )
| 447 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_a : List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase_ :
'''simple docstring'''
__lowerCAmelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__lowerCAmelCase : Optional[str] = field(
default=a , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__lowerCAmelCase : Optional[str] = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
__lowerCAmelCase : Optional[str] = field(
default=a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__lowerCAmelCase : bool = field(default=a , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowerCAmelCase : Optional[str] = field(
default=a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class lowercase_ :
'''simple docstring'''
__lowerCAmelCase : str = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
__lowerCAmelCase : Optional[str] = field(
default=a , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
__lowerCAmelCase : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__lowerCAmelCase : bool = field(
default=a , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
UpperCAmelCase = import_module('tasks' )
try:
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE , model_args.task_type )
UpperCAmelCase = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
UpperCAmelCase = token_classification_task.get_labels(data_args.labels )
UpperCAmelCase = dict(enumerate(SCREAMING_SNAKE_CASE ) )
UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )} , cache_dir=model_args.cache_dir , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
UpperCAmelCase = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ) -> Tuple[List[int], List[int]]:
UpperCAmelCase = np.argmax(SCREAMING_SNAKE_CASE , axis=2 )
UpperCAmelCase , UpperCAmelCase = preds.shape
UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )]
UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(SCREAMING_SNAKE_CASE : EvalPrediction ) -> Dict:
UpperCAmelCase , UpperCAmelCase = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"precision": precision_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"recall": recall_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
"f1": fa_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
}
# Data collator
UpperCAmelCase = DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , compute_metrics=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
UpperCAmelCase = trainer.evaluate()
UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
writer.write('%s = %s\n' % (key, value) )
results.update(SCREAMING_SNAKE_CASE )
# Predict
if training_args.do_predict:
UpperCAmelCase = TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = trainer.predict(SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase = align_predictions(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCAmelCase = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
UpperCAmelCase = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return results
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 447 | 1 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_UpperCAmelCase : str = logging.getLogger(__name__)
_UpperCAmelCase : List[str] = "pytorch_model.bin"
@dataclasses.dataclass
class lowercase :
__lowercase : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
__lowercase : Optional[str] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class lowercase :
__lowercase : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
__lowercase : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
__lowercase : Optional[str] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "A csv or a json file containing the validation data."} )
__lowercase : Optional[str] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "The name of the task to train on."} , )
__lowercase : Optional[List[str]] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class lowercase :
__lowercase : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
__lowercase : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
__lowercase : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
__lowercase : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
__lowercase : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
__lowercase : Optional[bool] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
__lowercase : Optional[bool] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
__lowercase : Optional[bool] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
__lowercase : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
__lowercase : Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
__lowercase : Optional[int] = dataclasses.field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Random seed for initialization."} , )
def A ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
UpperCamelCase = dataset.filter(lambda lowercase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
UpperCamelCase = int(eval_result * len(lowercase ) )
print(lowercase )
UpperCamelCase = dataset.sort('probability' , reverse=lowercase )
UpperCamelCase = dataset.select(range(lowercase ) )
UpperCamelCase = dataset.remove_columns(['label', 'probability'] )
UpperCamelCase = dataset.rename_column('prediction' , 'label' )
UpperCamelCase = dataset.map(lambda lowercase : {"label": idalabel[example["label"]]} )
UpperCamelCase = dataset.shuffle(seed=args.seed )
UpperCamelCase = os.path.join(lowercase , f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(lowercase , index=lowercase )
else:
dataset.to_json(lowercase )
def A ( lowercase , lowercase , lowercase , lowercase , **lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
UpperCamelCase = STModelArguments(model_name_or_path=lowercase )
UpperCamelCase = STDataArguments(train_file=lowercase , infer_file=lowercase )
UpperCamelCase = STTrainingArguments(output_dir=lowercase )
UpperCamelCase = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowercase ).items():
setattr(lowercase , lowercase , lowercase )
for key, value in kwargs.items():
if hasattr(lowercase , lowercase ):
setattr(lowercase , lowercase , lowercase )
# Sanity checks
UpperCamelCase = {}
UpperCamelCase = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
UpperCamelCase = args.train_file
UpperCamelCase = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
UpperCamelCase = args.eval_file
for key in data_files:
UpperCamelCase = data_files[key].split('.' )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
UpperCamelCase = extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('Creating the initial data directory for self-training...' )
UpperCamelCase = f'''{args.output_dir}/self-train_iter-{{}}'''.format
UpperCamelCase = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowercase )
os.makedirs(lowercase , exist_ok=lowercase )
accelerator.wait_for_everyone()
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = 0
UpperCamelCase = False
# Show the progress bar
UpperCamelCase = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
UpperCamelCase = data_dir_format(lowercase )
assert os.path.exists(lowercase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
UpperCamelCase = os.path.join(lowercase , 'stage-1' )
UpperCamelCase = {
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowercase , lowercase ):
arguments_dict.update({key: value} )
UpperCamelCase = os.path.join(lowercase , 'best-checkpoint' , lowercase )
if os.path.exists(lowercase ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , lowercase , lowercase , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , lowercase )
finetune(**lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase )
logger.info('Self-training job completed: iteration: %d, stage: 1.' , lowercase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
UpperCamelCase = os.path.join(lowercase , 'best-checkpoint' )
UpperCamelCase = os.path.join(lowercase , 'stage-2' )
# Update arguments_dict
UpperCamelCase = model_path
UpperCamelCase = data_files['train']
UpperCamelCase = current_output_dir
UpperCamelCase = os.path.join(lowercase , 'best-checkpoint' , lowercase )
if os.path.exists(lowercase ):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , lowercase , lowercase , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , lowercase )
finetune(**lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase )
logger.info('Self-training job completed: iteration: %d, stage: 2.' , lowercase )
UpperCamelCase = iteration
UpperCamelCase = data_dir_format(iteration + 1 )
UpperCamelCase = AutoConfig.from_pretrained(os.path.join(lowercase , 'best-checkpoint' ) )
UpperCamelCase = config.idalabel
UpperCamelCase = os.path.join(lowercase , 'eval_results_best-checkpoint.json' )
UpperCamelCase = os.path.join(lowercase , 'test_results_best-checkpoint.json' )
assert os.path.exists(lowercase )
with open(lowercase , 'r' ) as f:
UpperCamelCase = float(json.load(lowercase )[args.eval_metric] )
UpperCamelCase = os.path.join(lowercase , 'infer_output_best-checkpoint.csv' )
assert os.path.exists(lowercase )
# Loading the dataset from local csv or json files.
UpperCamelCase = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data']
UpperCamelCase = load_dataset('csv' , data_files={'data': infer_output_file} )['data']
if accelerator.is_main_process:
os.makedirs(lowercase , exist_ok=lowercase )
shutil.copy(lowercase , os.path.join(lowercase , f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(lowercase ):
shutil.copy(lowercase , os.path.join(lowercase , f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
accelerator.wait_for_everyone()
UpperCamelCase = os.path.join(lowercase , f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
UpperCamelCase = eval_result
if best_iteration is None:
UpperCamelCase = new_iteration
UpperCamelCase = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
UpperCamelCase = new_iteration
UpperCamelCase = new_eval_result
UpperCamelCase = 0
else:
if new_eval_result == best_eval_result:
UpperCamelCase = new_iteration
UpperCamelCase = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
UpperCamelCase = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , lowercase )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(lowercase , 'eval_results_best-iteration.json' ) , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 )
logger.info('Best evaluation result: %s = %f' , args.eval_metric , lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(lowercase , 'eval_results_best-iteration.json' ) , )
| 705 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Dict = "data2vec-text"
def __init__( self , A_=30_522 , A_=768 , A_=12 , A_=12 , A_=3_072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = classifier_dropout
class lowercase ( _SCREAMING_SNAKE_CASE ):
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 3 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__a :Any = ''
__a :int = ''
__a :str = ''
__a :Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def __snake_case ( ):
"""simple docstring"""
A_ , A_ = get_dataset(__UpperCamelCase ,__UpperCamelCase )
print("Processing..." )
A_ , A_ , A_ = update_image_and_anno(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
for index, image in enumerate(__UpperCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
A_ = random_chars(32 )
A_ = paths[index].split(os.sep )[-1].rsplit("." ,1 )[0]
A_ = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' ,__UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(__UpperCamelCase )} with {file_name}''' )
A_ = []
for anno in new_annos[index]:
A_ = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__UpperCamelCase )
with open(f'''/{file_root}.txt''' ,"w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = []
A_ = []
for label_file in glob.glob(os.path.join(__UpperCamelCase ,"*.txt" ) ):
A_ = label_file.split(os.sep )[-1].rsplit("." ,1 )[0]
with open(__UpperCamelCase ) as in_file:
A_ = in_file.readlines()
A_ = os.path.join(__UpperCamelCase ,f'''{label_name}.jpg''' )
A_ = []
for obj_list in obj_lists:
A_ = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__UpperCamelCase )
labels.append(__UpperCamelCase )
return img_paths, labels
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : list ,__UpperCamelCase : int = 1 ):
"""simple docstring"""
A_ = []
A_ = []
A_ = []
for idx in range(len(__UpperCamelCase ) ):
A_ = []
A_ = img_list[idx]
path_list.append(__UpperCamelCase )
A_ = anno_list[idx]
A_ = cva.imread(__UpperCamelCase )
if flip_type == 1:
A_ = cva.flip(__UpperCamelCase ,__UpperCamelCase )
for bbox in img_annos:
A_ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
A_ = cva.flip(__UpperCamelCase ,__UpperCamelCase )
for bbox in img_annos:
A_ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__UpperCamelCase )
new_imgs_list.append(__UpperCamelCase )
return new_imgs_list, new_annos_lists, path_list
def __snake_case ( __UpperCamelCase : int = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
A_ = ascii_lowercase + digits
return "".join(random.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
if __name__ == "__main__":
main()
print('DONE ✅') | 86 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 5 | 0 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def _lowerCamelCase ( snake_case ):
return np.dot(snake_case , snake_case )
class lowerCamelCase__ :
def __init__( self : Optional[Any] , *,
lowercase__ : float = np.inf , lowercase__ : str = "linear" , lowercase__ : float = 0.0 , ):
_lowerCAmelCase = regularization
_lowerCAmelCase = gamma
if kernel == "linear":
_lowerCAmelCase = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
_lowerCAmelCase = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
_lowerCAmelCase = f'Unknown kernel: {kernel}'
raise ValueError(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : ndarray , lowercase__ : ndarray ):
return np.dot(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , lowercase__ : ndarray , lowercase__ : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : list[ndarray] , lowercase__ : ndarray ):
_lowerCAmelCase = observations
_lowerCAmelCase = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((_lowerCAmelCase) , ) = np.shape(lowercase__ )
def to_minimize(lowercase__ : ndarray ) -> float:
_lowerCAmelCase = 0
((_lowerCAmelCase) , ) = np.shape(lowercase__ )
for i in range(lowercase__ ):
for j in range(lowercase__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(lowercase__ )
_lowerCAmelCase = LinearConstraint(lowercase__ , 0 , 0 )
_lowerCAmelCase = Bounds(0 , self.regularization )
_lowerCAmelCase = minimize(
lowercase__ , np.ones(lowercase__ ) , bounds=lowercase__ , constraints=[ly_contraint] ).x
_lowerCAmelCase = l_star
# calculating mean offset of separation plane to points
_lowerCAmelCase = 0
for i in range(lowercase__ ):
for j in range(lowercase__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
_lowerCAmelCase = s / n
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : ndarray ):
_lowerCAmelCase = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowercase__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 225 | import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = torch.exp(snake_case )
_lowerCAmelCase = torch.sum(snake_case , dim=1 ) # sum of exp(x_i)
_lowerCAmelCase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(snake_case ) - B / A
class lowerCamelCase__ ( nn.Module ):
def __init__( self : str , lowercase__ : List[str] ):
super().__init__()
_lowerCAmelCase = config.output_attentions
_lowerCAmelCase = config.output_hidden_states
_lowerCAmelCase = nn.ModuleList([BertLayer(lowercase__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase = nn.ModuleList([BertHighway(lowercase__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase = [-1 for _ in range(config.num_hidden_layers )]
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Any ):
if (type(lowercase__ ) is float) or (type(lowercase__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase = x
else:
_lowerCAmelCase = x
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : str ):
_lowerCAmelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : Any , lowercase__ : Optional[Any]=None , lowercase__ : List[str]=None , lowercase__ : str=None , lowercase__ : Optional[Any]=None , ):
_lowerCAmelCase = ()
_lowerCAmelCase = ()
_lowerCAmelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase = all_hidden_states + (hidden_states,)
_lowerCAmelCase = layer_module(
lowercase__ , lowercase__ , head_mask[i] , lowercase__ , lowercase__ )
_lowerCAmelCase = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase = all_attentions + (layer_outputs[1],)
_lowerCAmelCase = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase = current_outputs + (all_attentions,)
_lowerCAmelCase = self.highway[i](lowercase__ )
# logits, pooled_output
if not self.training:
_lowerCAmelCase = highway_exit[0]
_lowerCAmelCase = entropy(lowercase__ )
_lowerCAmelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(lowercase__ , i + 1 )
else:
_lowerCAmelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase = all_hidden_states + (hidden_states,)
_lowerCAmelCase = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase = outputs + (all_attentions,)
_lowerCAmelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " ,UpperCAmelCase ,)
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : Optional[int] , lowercase__ : List[Any] ):
super().__init__(lowercase__ )
_lowerCAmelCase = config
_lowerCAmelCase = BertEmbeddings(lowercase__ )
_lowerCAmelCase = DeeBertEncoder(lowercase__ )
_lowerCAmelCase = BertPooler(lowercase__ )
self.init_weights()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.encoder.init_highway_pooler(self.pooler )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return self.embeddings.word_embeddings
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : List[Any] ):
_lowerCAmelCase = value
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : List[str] ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(lowercase__ )
@add_start_docstrings_to_model_forward(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : int=None , lowercase__ : Union[str, Any]=None , lowercase__ : str=None , lowercase__ : Any=None , lowercase__ : int=None , lowercase__ : Optional[int]=None , lowercase__ : Any=None , lowercase__ : int=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowerCAmelCase = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowerCAmelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase = torch.ones(lowercase__ , device=lowercase__ )
if encoder_attention_mask is None:
_lowerCAmelCase = torch.ones(lowercase__ , device=lowercase__ )
if token_type_ids is None:
_lowerCAmelCase = torch.zeros(lowercase__ , dtype=torch.long , device=lowercase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase = self.get_extended_attention_mask(lowercase__ , lowercase__ , lowercase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase = self.get_head_mask(lowercase__ , self.config.num_hidden_layers )
_lowerCAmelCase = self.embeddings(
input_ids=lowercase__ , position_ids=lowercase__ , token_type_ids=lowercase__ , inputs_embeds=lowercase__ )
_lowerCAmelCase = self.encoder(
lowercase__ , attention_mask=lowercase__ , head_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , )
_lowerCAmelCase = encoder_outputs[0]
_lowerCAmelCase = self.pooler(lowercase__ )
_lowerCAmelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : List[Any] , lowercase__ : int , lowercase__ : Dict ):
_lowerCAmelCase = message
_lowerCAmelCase = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
def __init__( self : int , lowercase__ : Optional[Any] ):
super().__init__()
_lowerCAmelCase = BertPooler(lowercase__ )
_lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase = nn.Linear(config.hidden_size , config.num_labels )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : Dict ):
# Pooler
_lowerCAmelCase = encoder_outputs[0]
_lowerCAmelCase = self.pooler(lowercase__ )
# "return" pooler_output
# BertModel
_lowerCAmelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase = bmodel_output[1]
_lowerCAmelCase = self.dropout(lowercase__ )
_lowerCAmelCase = self.classifier(lowercase__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " ,UpperCAmelCase ,)
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : Union[str, Any] , lowercase__ : Any ):
super().__init__(lowercase__ )
_lowerCAmelCase = config.num_labels
_lowerCAmelCase = config.num_hidden_layers
_lowerCAmelCase = DeeBertModel(lowercase__ )
_lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : Dict=None , lowercase__ : int=None , lowercase__ : Union[str, Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : List[Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : Tuple=None , lowercase__ : Optional[int]=-1 , lowercase__ : Optional[int]=False , ):
_lowerCAmelCase = self.num_layers
try:
_lowerCAmelCase = self.bert(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , position_ids=lowercase__ , head_mask=lowercase__ , inputs_embeds=lowercase__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase = outputs[1]
_lowerCAmelCase = self.dropout(lowercase__ )
_lowerCAmelCase = self.classifier(lowercase__ )
_lowerCAmelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase = e.message
_lowerCAmelCase = e.exit_layer
_lowerCAmelCase = outputs[0]
if not self.training:
_lowerCAmelCase = entropy(lowercase__ )
_lowerCAmelCase = []
_lowerCAmelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase = MSELoss()
_lowerCAmelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase = CrossEntropyLoss()
_lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase = []
for highway_exit in outputs[-1]:
_lowerCAmelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase = MSELoss()
_lowerCAmelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase = CrossEntropyLoss()
_lowerCAmelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowercase__ )
if train_highway:
_lowerCAmelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase = (loss,) + outputs
if not self.training:
_lowerCAmelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 225 | 1 |
"""simple docstring"""
def snake_case ( A__ ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
UpperCAmelCase_ : str = len(A__ )
UpperCAmelCase_ : Union[str, Any] = max(A__ )
UpperCAmelCase_ : Union[str, Any] = min(A__ )
# create the counting array
UpperCAmelCase_ : Dict = coll_max + 1 - coll_min
UpperCAmelCase_ : Dict = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 ,A__ ):
UpperCAmelCase_ : Dict = counting_arr[i] + counting_arr[i - 1]
# create the output collection
UpperCAmelCase_ : Optional[Any] = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 ,A__ ) ):
UpperCAmelCase_ : List[str] = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def snake_case ( A__ ):
return "".join([chr(A__ ) for i in counting_sort([ord(A__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('''thisisthestring''') == "eghhiiinrsssttt"
lowerCamelCase_ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase_ = [int(item) for item in user_input.split(''',''')]
print(counting_sort(unsorted))
| 95 |
'''simple docstring'''
import cmath
import math
def SCREAMING_SNAKE_CASE ( lowercase_ : float , lowercase_ : float , lowercase_ : float , lowercase_ : float ):
lowercase = math.radians(lowercase_ )
lowercase = math.radians(lowercase_ )
# Convert voltage and current to rectangular form
lowercase = cmath.rect(lowercase_ , lowercase_ )
lowercase = cmath.rect(lowercase_ , lowercase_ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _snake_case :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Optional[Any]=32 , UpperCamelCase_ : int=16 , UpperCamelCase_ : List[str]=3 , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Dict=32 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : Union[str, Any]=[0, 1, 2, 3] , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : Tuple=37 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : List[Any]=0.0_2 , UpperCamelCase_ : str=3 , UpperCamelCase_ : Any=[1, 384, 24, 24] , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Dict=None , ):
lowerCAmelCase_ : Optional[Any] =parent
lowerCAmelCase_ : List[Any] =batch_size
lowerCAmelCase_ : Union[str, Any] =image_size
lowerCAmelCase_ : Dict =patch_size
lowerCAmelCase_ : Optional[Any] =num_channels
lowerCAmelCase_ : Dict =is_training
lowerCAmelCase_ : str =use_labels
lowerCAmelCase_ : Tuple =hidden_size
lowerCAmelCase_ : Tuple =num_hidden_layers
lowerCAmelCase_ : Optional[int] =backbone_out_indices
lowerCAmelCase_ : Tuple =num_attention_heads
lowerCAmelCase_ : int =intermediate_size
lowerCAmelCase_ : Optional[int] =hidden_act
lowerCAmelCase_ : List[Any] =hidden_dropout_prob
lowerCAmelCase_ : int =attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] =initializer_range
lowerCAmelCase_ : int =num_labels
lowerCAmelCase_ : Any =backbone_featmap_shape
lowerCAmelCase_ : str =scope
lowerCAmelCase_ : Any =is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase_ : Tuple =(image_size // patch_size) ** 2
lowerCAmelCase_ : Any =num_patches + 1
def __A ( self : str ):
lowerCAmelCase_ : Union[str, Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Optional[Any] =None
if self.use_labels:
lowerCAmelCase_ : Union[str, Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase_ : Union[str, Any] =self.get_config()
return config, pixel_values, labels
def __A ( self : Optional[int] ):
lowerCAmelCase_ : str ={
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=UpperCamelCase_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def __A ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] =DPTModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase_ : List[str] =model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict ):
lowerCAmelCase_ : Optional[Any] =self.num_labels
lowerCAmelCase_ : List[str] =DPTForDepthEstimation(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase_ : List[Any] =model(UpperCamelCase_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __A ( self : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : str ):
lowerCAmelCase_ : Union[str, Any] =self.num_labels
lowerCAmelCase_ : Union[str, Any] =DPTForSemanticSegmentation(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase_ : List[str] =model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __A ( self : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] =self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] =config_and_inputs
lowerCAmelCase_ : Optional[int] ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
_UpperCamelCase : Optional[Any] = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : List[Any] = False
def __A ( self : List[str] ):
lowerCAmelCase_ : Any =DPTModelTester(self )
lowerCAmelCase_ : Dict =ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def __A ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def __A ( self : int ):
pass
def __A ( self : Any ):
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[Any] =model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase_ : Optional[int] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def __A ( self : int ):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any =model_class(UpperCamelCase_ )
lowerCAmelCase_ : List[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : Dict =[*signature.parameters.keys()]
lowerCAmelCase_ : List[str] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def __A ( self : List[Any] ):
lowerCAmelCase_ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __A ( self : str ):
lowerCAmelCase_ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*UpperCamelCase_ )
def __A ( self : Tuple ):
lowerCAmelCase_ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ )
def __A ( self : Union[str, Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowerCAmelCase_ , lowerCAmelCase_ : Any =self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Optional[Any] =True
if model_class in get_values(UpperCamelCase_ ):
continue
lowerCAmelCase_ : List[str] =model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
lowerCAmelCase_ : List[Any] =self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
lowerCAmelCase_ : Tuple =model(**UpperCamelCase_ ).loss
loss.backward()
def __A ( self : Tuple ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowerCAmelCase_ , lowerCAmelCase_ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Optional[Any] =False
lowerCAmelCase_ : str =True
if model_class in get_values(UpperCamelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
lowerCAmelCase_ : Union[str, Any] =model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.gradient_checkpointing_enable()
model.train()
lowerCAmelCase_ : Dict =self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
lowerCAmelCase_ : int =model(**UpperCamelCase_ ).loss
loss.backward()
def __A ( self : List[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : str =_config_zero_init(UpperCamelCase_ )
for model_class in self.all_model_classes:
lowerCAmelCase_ : Dict =model_class(config=UpperCamelCase_ )
# Skip the check for the backbone
lowerCAmelCase_ : Dict =[]
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowerCAmelCase_ : Dict =[F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self : Dict ):
pass
@slow
def __A ( self : int ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowerCAmelCase_ : Union[str, Any] =DPTModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def __A ( self : Optional[Any] ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
lowerCAmelCase_ , lowerCAmelCase_ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : List[Any] ='''add'''
with self.assertRaises(UpperCamelCase_ ):
lowerCAmelCase_ : Any =DPTForDepthEstimation(UpperCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( ):
lowerCAmelCase_ : str =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ):
lowerCAmelCase_ : int =DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
lowerCAmelCase_ : Tuple =DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(UpperCamelCase_ )
lowerCAmelCase_ : Dict =prepare_img()
lowerCAmelCase_ : Any =image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] =model(**UpperCamelCase_ )
lowerCAmelCase_ : Optional[int] =outputs.predicted_depth
# verify the predicted depth
lowerCAmelCase_ : str =torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , UpperCamelCase_ )
lowerCAmelCase_ : int =torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , UpperCamelCase_ , atol=1E-4 ) )
| 305 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__lowercase = logging.getLogger(__name__)
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase_ : List[Any]=-1 ):
# in NER datasets, the last column is usually reserved for NER label
lowerCAmelCase_ : Tuple =label_idx
def __A ( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[Split, str] ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase_ : Any =mode.value
lowerCAmelCase_ : List[str] =os.path.join(UpperCamelCase_ , F'{mode}.txt' )
lowerCAmelCase_ : Tuple =1
lowerCAmelCase_ : Dict =[]
with open(UpperCamelCase_ , encoding='''utf-8''' ) as f:
lowerCAmelCase_ : Optional[Any] =[]
lowerCAmelCase_ : Optional[Any] =[]
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=UpperCamelCase_ , labels=UpperCamelCase_ ) )
guid_index += 1
lowerCAmelCase_ : Dict =[]
lowerCAmelCase_ : int =[]
else:
lowerCAmelCase_ : Tuple =line.split(''' ''' )
words.append(splits[0] )
if len(UpperCamelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=UpperCamelCase_ , labels=UpperCamelCase_ ) )
return examples
def __A ( self : List[str] , UpperCamelCase_ : TextIO , UpperCamelCase_ : TextIO , UpperCamelCase_ : List ):
lowerCAmelCase_ : Any =0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(UpperCamelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase_ : List[str] =line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(UpperCamelCase_ )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def __A ( self : int , UpperCamelCase_ : str ):
if path:
with open(UpperCamelCase_ , '''r''' ) as f:
lowerCAmelCase_ : int =f.read().splitlines()
if "O" not in labels:
lowerCAmelCase_ : str =['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[str] ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def __A ( self : Optional[int] , UpperCamelCase_ : str ):
if path:
with open(UpperCamelCase_ , '''r''' ) as f:
lowerCAmelCase_ : Tuple =f.read().splitlines()
if "O" not in labels:
lowerCAmelCase_ : Optional[int] =['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
def __A ( self : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[Split, str] ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase_ : str =mode.value
lowerCAmelCase_ : Tuple =os.path.join(UpperCamelCase_ , F'{mode}.txt' )
lowerCAmelCase_ : Any =1
lowerCAmelCase_ : Union[str, Any] =[]
with open(UpperCamelCase_ , encoding='''utf-8''' ) as f:
for sentence in parse_incr(UpperCamelCase_ ):
lowerCAmelCase_ : int =[]
lowerCAmelCase_ : Tuple =[]
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=UpperCamelCase_ , labels=UpperCamelCase_ ) )
guid_index += 1
return examples
def __A ( self : Dict , UpperCamelCase_ : TextIO , UpperCamelCase_ : TextIO , UpperCamelCase_ : List ):
lowerCAmelCase_ : Optional[Any] =0
for sentence in parse_incr(UpperCamelCase_ ):
lowerCAmelCase_ : List[str] =preds_list[example_id]
lowerCAmelCase_ : str =''''''
for token in sentence:
out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(UpperCamelCase_ )
example_id += 1
def __A ( self : Union[str, Any] , UpperCamelCase_ : str ):
if path:
with open(UpperCamelCase_ , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 305 | 1 |
from typing import Any
import numpy as np
def UpperCamelCase ( _a ) -> bool:
'''simple docstring'''
return np.array_equal(_a , matrix.conjugate().T )
def UpperCamelCase ( _a , _a ) -> Any:
'''simple docstring'''
lowercase_ :str = v.conjugate().T
lowercase_ :int = v_star.dot(_a )
assert isinstance(_a , np.ndarray )
return (v_star_dot.dot(_a )) / (v_star.dot(_a ))
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase_ :str = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowercase_ :Optional[int] = np.array([[1], [2], [3]] )
assert is_hermitian(_a ), f"{a} is not hermitian."
print(rayleigh_quotient(_a , _a ) )
lowercase_ :Any = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_a ), f"{a} is not hermitian."
assert rayleigh_quotient(_a , _a ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 257 |
def UpperCamelCase ( _a = 1_0_0 ) -> int:
'''simple docstring'''
lowercase_ :Any = (n * (n + 1) // 2) ** 2
lowercase_ :int = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 257 | 1 |
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( UpperCAmelCase_ : list[int] ) -> list[int]:
'''simple docstring'''
if len(UpperCAmelCase_ ) == 0:
return array
__snake_case : List[Any] = min(UpperCAmelCase_ ), max(UpperCAmelCase_ )
# Compute the variables
__snake_case : List[str] = _max - _min + 1
__snake_case : Tuple = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
__snake_case : Optional[int] = i - _min
__snake_case : int = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
__snake_case : Union[str, Any] = 0
for i in range(UpperCAmelCase_ ):
while holes_repeat[i] > 0:
__snake_case : Union[str, Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : Tuple= input("Enter numbers separated by comma:\n")
_a : Union[str, Any]= [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 707 | """simple docstring"""
from typing import Any
class UpperCamelCase :
def __init__(self : List[str] , _A : Any) -> int:
__snake_case : Any = data
__snake_case : Dict = None
def __repr__(self : Tuple) -> str:
return f"Node({self.data})"
class UpperCamelCase :
def __init__(self : Union[str, Any]) -> Union[str, Any]:
__snake_case : Any = None
def __iter__(self : Tuple) -> Any:
__snake_case : List[str] = self.head
while node:
yield node.data
__snake_case : Any = node.next
def __len__(self : str) -> int:
return sum(1 for _ in self)
def __repr__(self : int) -> str:
return "->".join([str(_A) for item in self])
def __getitem__(self : List[Any] , _A : int) -> Any:
if not 0 <= index < len(self):
raise ValueError('list index out of range.')
for i, node in enumerate(self):
if i == index:
return node
return None
def __setitem__(self : int , _A : int , _A : Any) -> None:
if not 0 <= index < len(self):
raise ValueError('list index out of range.')
__snake_case : Optional[int] = self.head
for _ in range(_A):
__snake_case : Any = current.next
__snake_case : Dict = data
def _lowercase (self : List[Any] , _A : Any) -> None:
self.insert_nth(len(self) , _A)
def _lowercase (self : List[str] , _A : Any) -> None:
self.insert_nth(0 , _A)
def _lowercase (self : Optional[Any] , _A : int , _A : Any) -> None:
if not 0 <= index <= len(self):
raise IndexError('list index out of range')
__snake_case : str = Node(_A)
if self.head is None:
__snake_case : str = new_node
elif index == 0:
__snake_case : Union[str, Any] = self.head # link new_node to head
__snake_case : int = new_node
else:
__snake_case : Any = self.head
for _ in range(index - 1):
__snake_case : Any = temp.next
__snake_case : Dict = temp.next
__snake_case : str = new_node
def _lowercase (self : Optional[int]) -> None: # print every node data
print(self)
def _lowercase (self : Optional[Any]) -> Any:
return self.delete_nth(0)
def _lowercase (self : List[str]) -> Any: # delete from tail
return self.delete_nth(len(self) - 1)
def _lowercase (self : int , _A : int = 0) -> Any:
if not 0 <= index <= len(self) - 1: # test if index is valid
raise IndexError('List index out of range.')
__snake_case : int = self.head # default first node
if index == 0:
__snake_case : Any = self.head.next
else:
__snake_case : List[Any] = self.head
for _ in range(index - 1):
__snake_case : List[str] = temp.next
__snake_case : Union[str, Any] = temp.next
__snake_case : str = temp.next.next
return delete_node.data
def _lowercase (self : str) -> bool:
return self.head is None
def _lowercase (self : Tuple) -> None:
__snake_case : List[Any] = None
__snake_case : Optional[Any] = self.head
while current:
# Store the current node's next node.
__snake_case : List[str] = current.next
# Make the current node's next point backwards
__snake_case : Optional[Any] = prev
# Make the previous node be the current node
__snake_case : Optional[Any] = current
# Make the current node the next node (to progress iteration)
__snake_case : Any = next_node
# Return prev in order to put the head at the end
__snake_case : Optional[Any] = prev
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
__snake_case : Union[str, Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(UpperCAmelCase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(UpperCAmelCase_ ) == i
linked_list.insert_nth(UpperCAmelCase_ , i + 1 )
assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(UpperCAmelCase_ ) == 9
assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__snake_case : Tuple = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(UpperCAmelCase_ ) == "->".join(str(UpperCAmelCase_ ) for i in range(-8 , 1 ) )
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
__snake_case : str = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
__snake_case : Optional[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(UpperCAmelCase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(UpperCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__snake_case : int = linked_list.delete_head()
assert result == -9
assert (
str(UpperCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__snake_case : Any = linked_list.delete_tail()
assert result == 12.2
assert (
str(UpperCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__snake_case : Tuple = linked_list.delete_nth(10 )
assert result is None
assert (
str(UpperCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(UpperCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(UpperCAmelCase_ )
assert (
str(UpperCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(UpperCAmelCase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __UpperCAmelCase ( ) -> List[Any]:
'''simple docstring'''
from doctest import testmod
testmod()
__snake_case : int = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(UpperCAmelCase_ )
print('\nReading/changing Node data using indexing:' )
print(F"Element at Position 1: {linked_list[1]}" )
__snake_case : Optional[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(UpperCAmelCase_ )
print(F"length of linked_list is : {len(UpperCAmelCase_ )}" )
if __name__ == "__main__":
main()
| 192 | 0 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[int] = image.size
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE_ :Any = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
SCREAMING_SNAKE_CASE_ :Dict = np.array(a ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE_ :Tuple = image[None].transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = torch.from_numpy(a )
return 2.0 * image - 1.0
class _UpperCAmelCase ( lowercase ):
def __init__( self : Optional[int] , UpperCAmelCase : VQModel , UpperCAmelCase : UNetaDModel , UpperCAmelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase)
@torch.no_grad()
def __call__( self : Optional[Any] , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[int] = 1_00 , UpperCAmelCase : Optional[float] = 0.0 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ):
if isinstance(UpperCAmelCase , PIL.Image.Image):
SCREAMING_SNAKE_CASE_ :int = 1
elif isinstance(UpperCAmelCase , torch.Tensor):
SCREAMING_SNAKE_CASE_ :Dict = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase)}")
if isinstance(UpperCAmelCase , PIL.Image.Image):
SCREAMING_SNAKE_CASE_ :str = preprocess(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
SCREAMING_SNAKE_CASE_ :int = (batch_size, self.unet.config.in_channels // 2, height, width)
SCREAMING_SNAKE_CASE_ :Optional[int] = next(self.unet.parameters()).dtype
SCREAMING_SNAKE_CASE_ :List[Any] = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Any = image.to(device=self.device , dtype=UpperCAmelCase)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCAmelCase , device=self.device)
SCREAMING_SNAKE_CASE_ :Tuple = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE_ :str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE_ :Tuple = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
SCREAMING_SNAKE_CASE_ :Optional[Any] = {}
if accepts_eta:
SCREAMING_SNAKE_CASE_ :Dict = eta
for t in self.progress_bar(UpperCAmelCase):
# concat latents and low resolution image in the channel dimension.
SCREAMING_SNAKE_CASE_ :str = torch.cat([latents, image] , dim=1)
SCREAMING_SNAKE_CASE_ :str = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase)
# predict the noise residual
SCREAMING_SNAKE_CASE_ :Any = self.unet(UpperCAmelCase , UpperCAmelCase).sample
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase).prev_sample
# decode the image latents with the VQVAE
SCREAMING_SNAKE_CASE_ :Any = self.vqvae.decode(UpperCAmelCase).sample
SCREAMING_SNAKE_CASE_ :Union[str, Any] = torch.clamp(UpperCAmelCase , -1.0 , 1.0)
SCREAMING_SNAKE_CASE_ :Optional[Any] = image / 2 + 0.5
SCREAMING_SNAKE_CASE_ :Any = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ :List[str] = self.numpy_to_pil(UpperCAmelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase)
| 631 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
SCREAMING_SNAKE_CASE__ = False
class _UpperCAmelCase ( unittest.TestCase ):
def _snake_case ( self : str , UpperCAmelCase : Dict=32):
set_seed(0)
SCREAMING_SNAKE_CASE_ :Dict = UNetaDModel(sample_size=UpperCAmelCase , in_channels=3 , out_channels=3)
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.optim.SGD(model.parameters() , lr=0.0001)
return model, optimizer
@slow
def _snake_case ( self : str):
SCREAMING_SNAKE_CASE_ :List[str] = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
SCREAMING_SNAKE_CASE_ :Any = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ :str = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=UpperCAmelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
SCREAMING_SNAKE_CASE_ :Any = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(UpperCAmelCase) for _ in range(4)]
SCREAMING_SNAKE_CASE_ :str = [torch.randn((4, 3, 32, 32)).to(UpperCAmelCase) for _ in range(4)]
SCREAMING_SNAKE_CASE_ :List[Any] = [torch.randint(0 , 10_00 , (4,)).long().to(UpperCAmelCase) for _ in range(4)]
# train with a DDPM scheduler
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Tuple = self.get_model_optimizer(resolution=32)
model.train().to(UpperCAmelCase)
for i in range(4):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE_ :List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
SCREAMING_SNAKE_CASE_ :Optional[Any] = model(UpperCAmelCase , timesteps[i]).sample
SCREAMING_SNAKE_CASE_ :List[Any] = torch.nn.functional.mse_loss(UpperCAmelCase , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[Any] = self.get_model_optimizer(resolution=32)
model.train().to(UpperCAmelCase)
for i in range(4):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE_ :List[str] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
SCREAMING_SNAKE_CASE_ :Tuple = model(UpperCAmelCase , timesteps[i]).sample
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.nn.functional.mse_loss(UpperCAmelCase , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-5))
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-5))
| 631 | 1 |
import math
def UpperCamelCase ( snake_case__ : int ):
'''simple docstring'''
__snake_case :Any = []
__snake_case :Optional[int] = 2
__snake_case :Union[str, Any] = int(math.sqrt(snake_case__ ) ) # Size of every segment
__snake_case :Any = [True] * (end + 1)
__snake_case :Dict = []
while start <= end:
if temp[start] is True:
in_prime.append(snake_case__ )
for i in range(start * start ,end + 1 ,snake_case__ ):
__snake_case :Union[str, Any] = False
start += 1
prime += in_prime
__snake_case :Any = end + 1
__snake_case :int = min(2 * end ,snake_case__ )
while low <= n:
__snake_case :Optional[int] = [True] * (high - low + 1)
for each in in_prime:
__snake_case :Optional[int] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(snake_case__ ,high + 1 ,snake_case__ ):
__snake_case :str = False
for j in range(len(snake_case__ ) ):
if temp[j] is True:
prime.append(j + low )
__snake_case :Union[str, Any] = high + 1
__snake_case :Any = min(high + end ,snake_case__ )
return prime
print(sieve(10**6))
| 291 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = "align_text_model"
def __init__( self , a__=3_05_22 , a__=7_68 , a__=12 , a__=12 , a__=30_72 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=2 , a__=0.02 , a__=1e-12 , a__=0 , a__="absolute" , a__=True , **a__ , ) -> List[str]:
'''simple docstring'''
super().__init__(**a__ )
__snake_case :Optional[int] = vocab_size
__snake_case :List[str] = hidden_size
__snake_case :Optional[Any] = num_hidden_layers
__snake_case :int = num_attention_heads
__snake_case :Optional[Any] = hidden_act
__snake_case :Union[str, Any] = intermediate_size
__snake_case :int = hidden_dropout_prob
__snake_case :Optional[Any] = attention_probs_dropout_prob
__snake_case :List[str] = max_position_embeddings
__snake_case :List[str] = type_vocab_size
__snake_case :Union[str, Any] = initializer_range
__snake_case :str = layer_norm_eps
__snake_case :Any = position_embedding_type
__snake_case :List[str] = use_cache
__snake_case :Optional[int] = pad_token_id
@classmethod
def __lowercase ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
__snake_case , __snake_case :Tuple = cls.get_config_dict(a__ , **a__ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
__snake_case :Any = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a__ , **a__ )
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : Optional[int] = "align_vision_model"
def __init__( self , a__ = 3 , a__ = 6_00 , a__ = 2.0 , a__ = 3.1 , a__ = 8 , a__ = [3, 3, 5, 3, 5, 5, 3] , a__ = [32, 16, 24, 40, 80, 1_12, 1_92] , a__ = [16, 24, 40, 80, 1_12, 1_92, 3_20] , a__ = [] , a__ = [1, 2, 2, 2, 1, 2, 1] , a__ = [1, 2, 2, 3, 3, 4, 1] , a__ = [1, 6, 6, 6, 6, 6, 6] , a__ = 0.25 , a__ = "swish" , a__ = 25_60 , a__ = "mean" , a__ = 0.02 , a__ = 0.0_01 , a__ = 0.99 , a__ = 0.2 , **a__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(**a__ )
__snake_case :Union[str, Any] = num_channels
__snake_case :List[str] = image_size
__snake_case :int = width_coefficient
__snake_case :int = depth_coefficient
__snake_case :List[Any] = depth_divisor
__snake_case :Any = kernel_sizes
__snake_case :Optional[int] = in_channels
__snake_case :Optional[int] = out_channels
__snake_case :int = depthwise_padding
__snake_case :List[str] = strides
__snake_case :Union[str, Any] = num_block_repeats
__snake_case :Dict = expand_ratios
__snake_case :Union[str, Any] = squeeze_expansion_ratio
__snake_case :Any = hidden_act
__snake_case :Optional[Any] = hidden_dim
__snake_case :Union[str, Any] = pooling_type
__snake_case :Union[str, Any] = initializer_range
__snake_case :Optional[Any] = batch_norm_eps
__snake_case :List[Any] = batch_norm_momentum
__snake_case :Optional[int] = drop_connect_rate
__snake_case :Union[str, Any] = sum(a__ ) * 4
@classmethod
def __lowercase ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
__snake_case , __snake_case :int = cls.get_config_dict(a__ , **a__ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
__snake_case :str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a__ , **a__ )
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : str = "align"
lowerCamelCase : Union[str, Any] = True
def __init__( self , a__=None , a__=None , a__=6_40 , a__=1.0 , a__=0.02 , **a__ , ) -> Dict:
'''simple docstring'''
super().__init__(**a__ )
if text_config is None:
__snake_case :Union[str, Any] = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
__snake_case :str = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
__snake_case :List[Any] = AlignTextConfig(**a__ )
__snake_case :Tuple = AlignVisionConfig(**a__ )
__snake_case :Tuple = projection_dim
__snake_case :int = temperature_init_value
__snake_case :Any = initializer_range
@classmethod
def __lowercase ( cls , a__ , a__ , **a__ ) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Optional[Any] = copy.deepcopy(self.__dict__ )
__snake_case :Dict = self.text_config.to_dict()
__snake_case :Union[str, Any] = self.vision_config.to_dict()
__snake_case :List[Any] = self.__class__.model_type
return output
| 291 | 1 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a ( A__ ) -> List[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def a ( A__ , A__ ) -> Any:
'''simple docstring'''
return (-y * np.log(A__ ) - (1 - y) * np.log(1 - h )).mean()
def a ( A__ , A__ , A__ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.dot(A__ , A__ )
return np.sum(y * scores - np.log(1 + np.exp(A__ ) ) )
def a ( A__ , A__ , A__ , A__=7_0_0_0_0 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = np.zeros(x.shape[1] )
for iterations in range(A__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : Dict = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : int = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : int = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cost_function(A__ , A__ )
if iterations % 1_0_0 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a_ :str = datasets.load_iris()
a_ :Dict = iris.data[:, :2]
a_ :int = (iris.target != 0) * 1
a_ :Dict = 0.1
a_ :str = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def a ( A__ ) -> int:
'''simple docstring'''
return sigmoid_function(
np.dot(A__ , A__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((a_) , (a_)) :str = (x[:, 0].min(), x[:, 0].max())
((a_) , (a_)) :Tuple = (x[:, 1].min(), x[:, 1].max())
((a_) , (a_)) :Dict = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a_ :Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
a_ :Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 35 | """simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case__ ( _snake_case : List[str] , _snake_case : Optional[Any] ):
"""simple docstring"""
assert isinstance(_snake_case , _snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case__ ( _snake_case : str , _snake_case : List[str] , _snake_case : List[str] ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ = JsonDatasetReader(_snake_case , cache_dir=_snake_case , keep_in_memory=_snake_case ).read()
_check_json_dataset(_snake_case , _snake_case )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def snake_case__ ( _snake_case : List[str] , _snake_case : str , _snake_case : List[str] ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ = features.copy() if features else default_expected_features
UpperCamelCase__ = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ = JsonDatasetReader(_snake_case , features=_snake_case , cache_dir=_snake_case ).read()
_check_json_dataset(_snake_case , _snake_case )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def snake_case__ ( _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Any ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
UpperCamelCase__ = features.copy() if features else default_expected_features
UpperCamelCase__ = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ = JsonDatasetReader(_snake_case , features=_snake_case , cache_dir=_snake_case ).read()
assert isinstance(_snake_case , _snake_case )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def snake_case__ ( _snake_case : List[str] , _snake_case : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
UpperCamelCase__ = features.copy()
UpperCamelCase__ = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = JsonDatasetReader(_snake_case , features=_snake_case , cache_dir=_snake_case ).read()
assert isinstance(_snake_case , _snake_case )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case__ ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Tuple ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ = JsonDatasetReader(_snake_case , cache_dir=_snake_case , split=_snake_case ).read()
_check_json_dataset(_snake_case , _snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def snake_case__ ( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
if issubclass(_snake_case , _snake_case ):
UpperCamelCase__ = jsonl_path
elif issubclass(_snake_case , _snake_case ):
UpperCamelCase__ = [jsonl_path]
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ = JsonDatasetReader(_snake_case , cache_dir=_snake_case ).read()
_check_json_dataset(_snake_case , _snake_case )
def snake_case__ ( _snake_case : List[str] , _snake_case : List[Any] , _snake_case : Dict=("train",) ):
"""simple docstring"""
assert isinstance(_snake_case , _snake_case )
for split in splits:
UpperCamelCase__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case__ ( _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : int ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ = JsonDatasetReader({"train": jsonl_path} , cache_dir=_snake_case , keep_in_memory=_snake_case ).read()
_check_json_datasetdict(_snake_case , _snake_case )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def snake_case__ ( _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : str ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ = features.copy() if features else default_expected_features
UpperCamelCase__ = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ = JsonDatasetReader({"train": jsonl_path} , features=_snake_case , cache_dir=_snake_case ).read()
_check_json_datasetdict(_snake_case , _snake_case )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case__ ( _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Tuple ):
"""simple docstring"""
if split:
UpperCamelCase__ = {split: jsonl_path}
else:
UpperCamelCase__ = "train"
UpperCamelCase__ = {"train": jsonl_path, "test": jsonl_path}
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ = JsonDatasetReader(_snake_case , cache_dir=_snake_case ).read()
_check_json_datasetdict(_snake_case , _snake_case , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def snake_case__ ( _snake_case : List[str] ):
"""simple docstring"""
return json.load(_snake_case )
def snake_case__ ( _snake_case : Union[str, Any] ):
"""simple docstring"""
return [json.loads(_snake_case ) for line in buffer]
class lowerCAmelCase :
'''simple docstring'''
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def lowerCamelCase__ ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[Any] ) -> int:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ = load_json_function(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(exported_content[0] , lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1_0
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def lowerCamelCase__ ( self :str , lowerCamelCase_ :int , lowerCamelCase_ :Dict , lowerCamelCase_ :Tuple , lowerCamelCase_ :str , lowerCamelCase_ :List[str] ) -> Tuple:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ , orient=lowerCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ = load_json(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCamelCase_ ) == 1_0
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] ) -> Tuple:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ = load_json_function(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(exported_content[0] , lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1_0
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :str , lowerCamelCase_ :List[str] , lowerCamelCase_ :Tuple , lowerCamelCase_ :str , lowerCamelCase_ :Dict ) -> Optional[Any]:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ , orient=lowerCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ = load_json(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCamelCase_ ) == 1_0
def lowerCamelCase__ ( self :str , lowerCamelCase_ :Any ) -> Any:
"""simple docstring"""
with pytest.raises(lowerCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def lowerCamelCase__ ( self :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase__ = tmp_path_factory.mktemp("data" ) / f'test.json.{extension}'
UpperCamelCase__ = str(shared_datadir / f'test_file.json.{extension}' )
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , compression=lowerCamelCase_ ).write()
with fsspec.open(lowerCamelCase_ , "rb" , compression="infer" ) as f:
UpperCamelCase__ = f.read()
with fsspec.open(lowerCamelCase_ , "rb" , compression="infer" ) as f:
UpperCamelCase__ = f.read()
assert exported_content == original_content | 516 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class a_ ( lowercase_ ):
UpperCamelCase_ : int = '''poolformer'''
def __init__( self : Any , snake_case__ : str=3 , snake_case__ : Tuple=16 , snake_case__ : List[Any]=16 , snake_case__ : List[Any]=3 , snake_case__ : Optional[Any]=4.0 , snake_case__ : Optional[int]=[2, 2, 6, 2] , snake_case__ : Dict=[64, 128, 320, 512] , snake_case__ : int=[7, 3, 3, 3] , snake_case__ : Optional[Any]=[4, 2, 2, 2] , snake_case__ : Dict=[2, 1, 1, 1] , snake_case__ : List[str]=4 , snake_case__ : int=0.0 , snake_case__ : Optional[int]="gelu" , snake_case__ : Optional[int]=True , snake_case__ : Union[str, Any]=1E-5 , snake_case__ : Dict=0.02 , **snake_case__ : Any , ):
A = num_channels
A = patch_size
A = stride
A = padding
A = pool_size
A = hidden_sizes
A = mlp_ratio
A = depths
A = patch_sizes
A = strides
A = num_encoder_blocks
A = drop_path_rate
A = hidden_act
A = use_layer_scale
A = layer_scale_init_value
A = initializer_range
super().__init__(**snake_case__ )
class a_ ( lowercase_ ):
UpperCamelCase_ : Dict = version.parse("1.11" )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
return 2E-3
| 720 | """simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = MobileBertConfig.from_json_file(lowerCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase__ = MobileBertForPreTraining(lowerCamelCase__ )
# Load weights from tf checkpoint
lowerCAmelCase__ = load_tf_weights_in_mobilebert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 674 | 0 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : int , UpperCAmelCase__ : Tuple ):
"""simple docstring"""
snake_case : int = 3
snake_case : Dict = 250
snake_case : List[Any] = ids_tensor((batch_size, length) , UpperCAmelCase__ )
snake_case : List[str] = torch.ones((batch_size, length) , device=UpperCAmelCase__ , dtype=torch.float ) / length
return input_ids, scores
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case , snake_case : Tuple = self._get_tensors(5 )
snake_case : List[Any] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
snake_case , snake_case : Optional[int] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
snake_case , snake_case : int = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
def lowerCAmelCase( self : Any ):
"""simple docstring"""
snake_case : Optional[Any] = MaxLengthCriteria(max_length=10 )
snake_case , snake_case : Optional[int] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
snake_case , snake_case : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
snake_case , snake_case : Dict = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
snake_case , snake_case : int = self._get_tensors(5 )
self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
snake_case , snake_case : int = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
snake_case , snake_case : Any = self._get_tensors(10 )
self.assertTrue(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
snake_case : Optional[int] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case , snake_case : int = self._get_tensors(5 )
snake_case : Optional[Any] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
snake_case : Tuple = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCAmelCase__ , UpperCAmelCase__ ) )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(UpperCAmelCase__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
snake_case : List[Any] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(UpperCAmelCase__ ) , 1 )
| 598 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'fnet'
def __init__( self , lowercase=32_000 , lowercase=768 , lowercase=12 , lowercase=3_072 , lowercase="gelu_new" , lowercase=0.1 , lowercase=512 , lowercase=4 , lowercase=0.02 , lowercase=1e-12 , lowercase=False , lowercase=512 , lowercase=3 , lowercase=1 , lowercase=2 , **lowercase , ) -> int:
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = type_vocab_size
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = use_tpu_fourier_optimizations
lowerCAmelCase = tpu_short_seq_length
| 532 | 0 |
def _lowerCAmelCase( __A , __A ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _lowerCAmelCase( __A , __A=0 ):
return sorted(__A , key=lambda __A : x[column] )
def _lowerCAmelCase( __A , __A , __A=float("inf" ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1 , __A ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _lowerCAmelCase( __A , __A , __A=float("inf" ) ):
for i in range(min(6 , points_counts - 1 ) , __A ):
for j in range(max(0 , i - 6 ) , __A ):
UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCAmelCase = current_dis
return min_dis
def _lowerCAmelCase( __A , __A , __A ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(__A , __A )
# recursion
UpperCAmelCase = points_counts // 2
UpperCAmelCase = closest_pair_of_points_sqr(
__A , points_sorted_on_y[:mid] , __A )
UpperCAmelCase = closest_pair_of_points_sqr(
__A , points_sorted_on_y[mid:] , points_counts - mid )
UpperCAmelCase = min(__A , __A )
UpperCAmelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__A )
UpperCAmelCase = dis_between_closest_in_strip(
__A , len(__A ) , __A )
return min(__A , __A )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = column_based_sort(__A , column=0 )
UpperCAmelCase = column_based_sort(__A , column=1 )
return (
closest_pair_of_points_sqr(
__A , __A , __A )
) ** 0.5
if __name__ == "__main__":
lowerCAmelCase__ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 1 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _UpperCamelCase ( self : int ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : List[Any] ) -> Dict:
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=0.9 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : Optional[int]=0.5 ) -> Any:
if NLTK_VERSION >= version.Version("3.6.5" ):
UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(lowerCAmelCase__ ) , word_tokenize(lowerCAmelCase__ ) , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
else:
UpperCAmelCase = [
meteor_score.single_meteor_score(lowerCAmelCase__ , lowerCAmelCase__ , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return {"meteor": np.mean(lowerCAmelCase__ )}
| 1 | 1 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : torch.FloatTensor
lowerCAmelCase_ : Optional[torch.FloatTensor] = None
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : List[Any]=0.999 , snake_case : Optional[int]="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case : str ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
snake_case_ = []
for i in range(snake_case ):
snake_case_ = i / num_diffusion_timesteps
snake_case_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case ) / alpha_bar_fn(snake_case ) , snake_case ) )
return torch.tensor(snake_case , dtype=torch.floataa )
class _snake_case ( lowercase_ , lowercase_ ):
@register_to_config
def __init__( self , a__ = 1_000 , a__ = "fixed_small_log" , a__ = True , a__ = 1.0 , a__ = "epsilon" , a__ = "squaredcos_cap_v2" , ) -> List[Any]:
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
snake_case_ = betas_for_alpha_bar(a__ )
snake_case_ = 1.0 - self.betas
snake_case_ = torch.cumprod(self.alphas , dim=0 )
snake_case_ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
snake_case_ = 1.0
# setable values
snake_case_ = None
snake_case_ = torch.from_numpy(np.arange(0 , a__ )[::-1].copy() )
snake_case_ = variance_type
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = num_inference_steps
snake_case_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
snake_case_ = (np.arange(0 , a__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
snake_case_ = torch.from_numpy(a__ ).to(a__ )
def lowerCAmelCase__ ( self , a__ , a__=None , a__=None , a__=None ) -> str:
'''simple docstring'''
if prev_timestep is None:
snake_case_ = t - 1
snake_case_ = self.alphas_cumprod[t]
snake_case_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
snake_case_ = 1 - alpha_prod_t
snake_case_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
snake_case_ = self.betas[t]
else:
snake_case_ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
snake_case_ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
snake_case_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
snake_case_ = torch.log(torch.clamp(a__ , min=1e-20 ) )
snake_case_ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
snake_case_ = variance.log()
snake_case_ = beta.log()
snake_case_ = (predicted_variance + 1) / 2
snake_case_ = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ = None , a__=None , a__ = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
'''simple docstring'''
snake_case_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
snake_case_ , snake_case_ = torch.split(a__ , sample.shape[1] , dim=1 )
else:
snake_case_ = None
# 1. compute alphas, betas
if prev_timestep is None:
snake_case_ = t - 1
snake_case_ = self.alphas_cumprod[t]
snake_case_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
snake_case_ = 1 - alpha_prod_t
snake_case_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
snake_case_ = self.betas[t]
snake_case_ = self.alphas[t]
else:
snake_case_ = 1 - alpha_prod_t / alpha_prod_t_prev
snake_case_ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
snake_case_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
snake_case_ = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
snake_case_ = torch.clamp(
a__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
snake_case_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
snake_case_ = 0
if t > 0:
snake_case_ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=a__ , device=model_output.device )
snake_case_ = self._get_variance(
a__ , predicted_variance=a__ , prev_timestep=a__ , )
if self.variance_type == "fixed_small_log":
snake_case_ = variance
elif self.variance_type == "learned_range":
snake_case_ = (0.5 * variance).exp()
else:
raise ValueError(
F'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'
" for the UnCLIPScheduler." )
snake_case_ = variance * variance_noise
snake_case_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=a__ , pred_original_sample=a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , ) -> torch.FloatTensor:
'''simple docstring'''
snake_case_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
snake_case_ = timesteps.to(original_samples.device )
snake_case_ = alphas_cumprod[timesteps] ** 0.5
snake_case_ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
snake_case_ = sqrt_alpha_prod.unsqueeze(-1 )
snake_case_ = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
snake_case_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
snake_case_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 400 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_SCREAMING_SNAKE_CASE : List[Any] = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = 50 # max width of layer names
_SCREAMING_SNAKE_CASE : Union[str, Any] = 70 # max width of quantizer names
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=snake_case , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=snake_case , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=snake_case , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=snake_case , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=snake_case , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=snake_case , type=snake_case , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=snake_case , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def UpperCamelCase_( snake_case : List[str] ):
'''simple docstring'''
if args.calibrator == "max":
snake_case_ = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
snake_case_ = "histogram"
elif args.calibrator == "mse":
snake_case_ = "histogram"
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
snake_case_ = QuantDescriptor(num_bits=args.aprec , calib_method=snake_case )
snake_case_ = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(snake_case )
quant_nn.QuantLinear.set_default_quant_desc_weight(snake_case )
def UpperCamelCase_( snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=False , snake_case : List[Any]=False ):
'''simple docstring'''
logger.info("Configuring Model for Quantization" )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(snake_case , ["embeddings"] , which="weight" , _disabled=snake_case )
if args.quant_disable:
set_quantizer_by_name(snake_case , [""] , _disabled=snake_case )
if args.quant_disable_keyword:
set_quantizer_by_name(snake_case , args.quant_disable_keyword , _disabled=snake_case )
if args.quant_disable_layer_module:
set_quantizer_by_name(snake_case , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=snake_case )
if args.quant_enable_layer_module:
set_quantizer_by_name(snake_case , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=snake_case )
if args.recalibrate_weights:
recalibrate_weights(snake_case )
if args.fuse_qkv:
fuse_qkv(snake_case , snake_case )
if args.clip_gelu:
clip_gelu(snake_case , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(snake_case )
def UpperCamelCase_( snake_case : List[Any] ):
'''simple docstring'''
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : Optional[int] ):
'''simple docstring'''
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(snake_case )
def UpperCamelCase_( snake_case : str , snake_case : List[str] ):
'''simple docstring'''
def fusea(snake_case : List[Any] , snake_case : str , snake_case : Dict ):
for mod in [qq, qk, qv]:
if not hasattr(snake_case , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
snake_case_ = qq._amax.detach().item()
snake_case_ = qk._amax.detach().item()
snake_case_ = qv._amax.detach().item()
snake_case_ = max(snake_case , snake_case , snake_case )
qq._amax.fill_(snake_case )
qk._amax.fill_(snake_case )
qv._amax.fill_(snake_case )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def UpperCamelCase_( snake_case : str , snake_case : Optional[Any] ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
snake_case_ = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=snake_case )
snake_case_ = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(snake_case , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
snake_case_ = mod.weight.shape[0]
snake_case_ = mod._weight_quantizer._amax.detach()
snake_case_ = torch.ones(snake_case , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(snake_case , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
snake_case_ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
snake_case_ = set(range(len(mod.weight.size() ) ) ) - axis_set
snake_case_ = pytorch_quantization.utils.reduce_amax(mod.weight , axis=snake_case , keepdims=snake_case ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
snake_case_ = amax
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : List[Any]=2_5 , snake_case : Optional[Any]=1_8_0 , snake_case : int=None ):
'''simple docstring'''
if ignore is None:
snake_case_ = []
elif not isinstance(snake_case , snake_case ):
snake_case_ = [ignore]
snake_case_ = 0
for name, mod in model.named_modules():
if not hasattr(snake_case , "weight" ):
continue
snake_case_ = max(snake_case , len(snake_case ) )
for name, mod in model.named_modules():
snake_case_ = getattr(snake_case , "_input_quantizer" , snake_case )
snake_case_ = getattr(snake_case , "_weight_quantizer" , snake_case )
if not hasattr(snake_case , "weight" ):
continue
if type(snake_case ) in ignore:
continue
if [True for s in ignore if type(snake_case ) is str and s in name]:
continue
snake_case_ = f'Act:{input_q.extra_repr()}'
snake_case_ = f'Wgt:{weight_q.extra_repr()}'
snake_case_ = f'{name:{name_width}} {act_str} {wgt_str}'
if len(snake_case ) <= line_width:
logger.info(snake_case )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def UpperCamelCase_( snake_case : Dict ):
'''simple docstring'''
snake_case_ = 0
for name, mod in model.named_modules():
if isinstance(snake_case , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Any , snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = getattr(snake_case , snake_case , snake_case )
if quantizer_mod is not None:
assert hasattr(snake_case , snake_case )
setattr(snake_case , snake_case , snake_case )
else:
logger.warning(f'{name} has no {quantizer}' )
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Tuple="both" , **snake_case : Union[str, Any] ):
'''simple docstring'''
snake_case_ = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(snake_case , snake_case , "_input_quantizer" , snake_case , snake_case )
if which in ["weight", "both"]:
set_quantizer(snake_case , snake_case , "_weight_quantizer" , snake_case , snake_case )
logger.info(snake_case )
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : str , **snake_case : str ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(snake_case , "_input_quantizer" ) or hasattr(snake_case , "_weight_quantizer" ):
for n in names:
if re.search(snake_case , snake_case ):
set_quantizers(snake_case , snake_case , **snake_case )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(snake_case , snake_case ):
snake_case_ = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(snake_case , snake_case , snake_case )
logger.info(snake_case )
| 400 | 1 |
lowercase : List[str] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase : List[Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowercase : List[str] = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int) -> str:
'''simple docstring'''
assert len(str(_lowerCamelCase)) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__UpperCamelCase : Optional[int] = year // 100
__UpperCamelCase : List[Any] = (5 * (century % 4) + 2) % 7
__UpperCamelCase : Dict = year % 100
__UpperCamelCase : Union[str, Any] = centurian % 12
__UpperCamelCase : Optional[Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__UpperCamelCase : Optional[int] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__UpperCamelCase : Union[str, Any] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod() | 94 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int=32 , _lowerCamelCase : str=10 , _lowerCamelCase : Dict=100 , _lowerCamelCase : int=1_026 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : str="data/tokenized_stories_train_wikitext103.jbl" , _lowerCamelCase : Any="igf_context_pairs.jbl" , ) -> str:
'''simple docstring'''
set_seed(3)
# generate train_data and objective_set
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = generate_datasets(
_lowerCamelCase , _lowerCamelCase , number=_lowerCamelCase , min_len=1_026 , trim=_lowerCamelCase)
# keeps model same across runs
set_seed(4)
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__UpperCamelCase : Dict = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# load pretrained model
__UpperCamelCase : str = load_gpta("gpt2").to(_lowerCamelCase)
print("computing perplexity on objective set")
__UpperCamelCase : Union[str, Any] = compute_perplexity(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase).item()
print("perplexity on objective set:" , _lowerCamelCase)
# collect igf pairs and save to file demo.jbl
collect_objective_set(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any=15 , _lowerCamelCase : Union[str, Any]=128 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]="igf_model.pt" , ) -> Any:
'''simple docstring'''
set_seed(42)
# Load pre-trained model
__UpperCamelCase : int = GPTaLMHeadModel.from_pretrained("gpt2")
# Initialize secondary learner to use embedding weights of model
__UpperCamelCase : Any = SecondaryLearner(_lowerCamelCase)
# Train secondary learner
__UpperCamelCase : Union[str, Any] = train_secondary_learner(
_lowerCamelCase , _lowerCamelCase , max_epochs=_lowerCamelCase , batch_size=_lowerCamelCase , eval_freq=100 , igf_model_path=_lowerCamelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int=32 , _lowerCamelCase : Tuple=1_000 , _lowerCamelCase : Dict=16 , _lowerCamelCase : Union[str, Any]=1.0 , _lowerCamelCase : Optional[Any]=recopy_gpta , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[Any]=10 , _lowerCamelCase : Union[str, Any]="gpt2_finetuned.pt" , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
__UpperCamelCase : List[Any] = RandomSampler(_lowerCamelCase)
__UpperCamelCase : Any = DataLoader(_lowerCamelCase , sampler=_lowerCamelCase)
__UpperCamelCase : Tuple = max_steps // (len(_lowerCamelCase)) + 1
__UpperCamelCase : List[Any] = 0
__UpperCamelCase : List[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=_lowerCamelCase)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = recopy_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
model.train()
if secondary_learner is not None:
secondary_learner.to(_lowerCamelCase)
secondary_learner.eval()
__UpperCamelCase : Union[str, Any] = []
__UpperCamelCase : Any = 0
__UpperCamelCase : List[Any] = []
__UpperCamelCase : Any = []
# Compute the performance of the transformer model at the beginning
__UpperCamelCase : str = compute_perplexity(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
test_perps.append(_lowerCamelCase)
print("Test perplexity, step" , _lowerCamelCase , ":" , _lowerCamelCase)
for epoch in range(int(_lowerCamelCase)):
for step, example in enumerate(_lowerCamelCase):
torch.cuda.empty_cache()
__UpperCamelCase : Optional[Any] = random.randint(0 , example.size(2) - context_len - 1)
__UpperCamelCase : Optional[Any] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__UpperCamelCase : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase)
__UpperCamelCase : int = True
if secondary_learner is not None:
__UpperCamelCase : Optional[int] = secondary_learner.forward(
torch.tensor(_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase).unsqueeze(0))[0].item()
observed_qs.append(float(_lowerCamelCase))
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__UpperCamelCase : List[str] = -1
if predicted_q < threshold:
__UpperCamelCase : Optional[int] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu()))
__UpperCamelCase : Optional[Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__UpperCamelCase : str = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0)
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__UpperCamelCase : List[Any] = compute_perplexity(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
test_perps.append(_lowerCamelCase)
print("Test perplexity, step" , _lowerCamelCase , ":" , _lowerCamelCase)
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _lowerCamelCase)
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Tuple = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task")
# Required parameters
parser.add_argument(
"--data_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=_lowerCamelCase , default=_lowerCamelCase , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=_lowerCamelCase , default=_lowerCamelCase , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=_lowerCamelCase , type=_lowerCamelCase , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=_lowerCamelCase , default=_lowerCamelCase , help="A seed for reproducible training.")
parser.add_argument(
"--context_len" , default=32 , type=_lowerCamelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=_lowerCamelCase , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=_lowerCamelCase , help="secondary model evaluation is triggered at eval_freq")
parser.add_argument("--max_steps" , default=1_000 , type=_lowerCamelCase , help="To calculate training epochs")
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=_lowerCamelCase , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=_lowerCamelCase , help="batch size of training data of language model(gpt2) ")
parser.add_argument(
"--eval_interval" , default=10 , type=_lowerCamelCase , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=_lowerCamelCase , help="The number of examples split to be used as objective_set/test_data")
parser.add_argument(
"--min_len" , default=1_026 , type=_lowerCamelCase , help="The minimum length of the article to be used as objective set")
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=_lowerCamelCase , help="number of epochs to train secondary learner")
parser.add_argument("--trim" , default=_lowerCamelCase , type=_lowerCamelCase , help="truncate the example if it exceeds context length")
parser.add_argument(
"--threshold" , default=1.0 , type=_lowerCamelCase , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=_lowerCamelCase , help="finetuned_model_name")
parser.add_argument(
"--recopy_model" , default=_lowerCamelCase , type=_lowerCamelCase , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=_lowerCamelCase , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
__UpperCamelCase : Any = joblib.load("data/IGF_values.jbl")
# Train secondary learner
__UpperCamelCase : Optional[Any] = training_secondary_learner(
_lowerCamelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
__UpperCamelCase : int = GPTaLMHeadModel.from_pretrained("gpt2")
set_seed(42)
# Generate train and test data to train and evaluate gpt2 model
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1_026 , trim=_lowerCamelCase)
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=_lowerCamelCase , secondary_learner=_lowerCamelCase , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main() | 94 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 475 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self : Optional[int] , _lowercase : int = 1_28 , _lowercase : int = 2_56 , _lowercase : float = 2_0_0_0.0 , _lowercase : int = 7_68 , _lowercase : int = 12 , _lowercase : int = 12 , _lowercase : int = 64 , _lowercase : int = 20_48 , _lowercase : float = 0.1 , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Sequential(
nn.Linear(_lowercase , d_model * 4 , bias=_lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowercase ) , nn.SiLU() , )
UpperCAmelCase__ = nn.Embedding(_lowercase , _lowercase )
UpperCAmelCase__ = False
UpperCAmelCase__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase__ = nn.Dropout(p=_lowercase )
UpperCAmelCase__ = nn.ModuleList()
for lyr_num in range(_lowercase ):
# FiLM conditional T5 decoder
UpperCAmelCase__ = DecoderLayer(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
self.decoders.append(_lowercase )
UpperCAmelCase__ = TaLayerNorm(_lowercase )
UpperCAmelCase__ = nn.Dropout(p=_lowercase )
UpperCAmelCase__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
def _UpperCAmelCase ( self : List[str] , _lowercase : Dict , _lowercase : Any ):
"""simple docstring"""
UpperCAmelCase__ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def _UpperCAmelCase ( self : Dict , _lowercase : List[Any] , _lowercase : Union[str, Any] , _lowercase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCAmelCase__ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCAmelCase__ = self.conditioning_emb(_lowercase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCAmelCase__ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCAmelCase__ = torch.broadcast_to(
torch.arange(_lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCAmelCase__ = self.position_encoding(_lowercase )
UpperCAmelCase__ = self.continuous_inputs_projection(_lowercase )
inputs += position_encodings
UpperCAmelCase__ = self.dropout(_lowercase )
# decoder: No padding present.
UpperCAmelCase__ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCAmelCase__ = [(x, self.encoder_decoder_mask(_lowercase , _lowercase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCAmelCase__ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCAmelCase__ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCAmelCase__ = lyr(
_lowercase , conditioning_emb=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )[0]
UpperCAmelCase__ = self.decoder_norm(_lowercase )
UpperCAmelCase__ = self.post_dropout(_lowercase )
UpperCAmelCase__ = self.spec_out(_lowercase )
return spec_out
class lowercase__ ( nn.Module ):
def __init__( self : str , _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : int , _lowercase : int , _lowercase : Optional[int] , _lowercase : Union[str, Any]=1E-6 ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase ) )
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : List[Any] , _lowercase : Union[str, Any]=None , _lowercase : Dict=None , _lowercase : int=None , _lowercase : Optional[int]=None , _lowercase : Any=None , ):
"""simple docstring"""
UpperCAmelCase__ = self.layer[0](
_lowercase , conditioning_emb=_lowercase , attention_mask=_lowercase , )
if encoder_hidden_states is not None:
UpperCAmelCase__ = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
UpperCAmelCase__ = self.layer[1](
_lowercase , key_value_states=_lowercase , attention_mask=_lowercase , )
# Apply Film Conditional Feed Forward layer
UpperCAmelCase__ = self.layer[-1](_lowercase , _lowercase )
return (hidden_states,)
class lowercase__ ( nn.Module ):
def __init__( self : List[str] , _lowercase : List[Any] , _lowercase : Any , _lowercase : Union[str, Any] , _lowercase : str ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = TaLayerNorm(_lowercase )
UpperCAmelCase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
UpperCAmelCase__ = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
UpperCAmelCase__ = nn.Dropout(_lowercase )
def _UpperCAmelCase ( self : Tuple , _lowercase : Tuple , _lowercase : Optional[Any]=None , _lowercase : int=None , ):
"""simple docstring"""
UpperCAmelCase__ = self.layer_norm(_lowercase )
if conditioning_emb is not None:
UpperCAmelCase__ = self.FiLMLayer(_lowercase , _lowercase )
# Self-attention block
UpperCAmelCase__ = self.attention(_lowercase )
UpperCAmelCase__ = hidden_states + self.dropout(_lowercase )
return hidden_states
class lowercase__ ( nn.Module ):
def __init__( self : Dict , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Dict ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase )
UpperCAmelCase__ = TaLayerNorm(_lowercase , eps=_lowercase )
UpperCAmelCase__ = nn.Dropout(_lowercase )
def _UpperCAmelCase ( self : List[str] , _lowercase : List[str] , _lowercase : Dict=None , _lowercase : Dict=None , ):
"""simple docstring"""
UpperCAmelCase__ = self.layer_norm(_lowercase )
UpperCAmelCase__ = self.attention(
_lowercase , encoder_hidden_states=_lowercase , attention_mask=attention_mask.squeeze(1 ) , )
UpperCAmelCase__ = hidden_states + self.dropout(_lowercase )
return layer_output
class lowercase__ ( nn.Module ):
def __init__( self : Dict , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : List[Any] , _lowercase : Tuple ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = TaDenseGatedActDense(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase )
UpperCAmelCase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase )
UpperCAmelCase__ = TaLayerNorm(_lowercase , eps=_lowercase )
UpperCAmelCase__ = nn.Dropout(_lowercase )
def _UpperCAmelCase ( self : Union[str, Any] , _lowercase : Any , _lowercase : int=None ):
"""simple docstring"""
UpperCAmelCase__ = self.layer_norm(_lowercase )
if conditioning_emb is not None:
UpperCAmelCase__ = self.film(_lowercase , _lowercase )
UpperCAmelCase__ = self.DenseReluDense(_lowercase )
UpperCAmelCase__ = hidden_states + self.dropout(_lowercase )
return hidden_states
class lowercase__ ( nn.Module ):
def __init__( self : Optional[Any] , _lowercase : List[Any] , _lowercase : Dict , _lowercase : Dict ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase__ = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCAmelCase__ = nn.Dropout(_lowercase )
UpperCAmelCase__ = NewGELUActivation()
def _UpperCAmelCase ( self : Any , _lowercase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.act(self.wi_a(_lowercase ) )
UpperCAmelCase__ = self.wi_a(_lowercase )
UpperCAmelCase__ = hidden_gelu * hidden_linear
UpperCAmelCase__ = self.dropout(_lowercase )
UpperCAmelCase__ = self.wo(_lowercase )
return hidden_states
class lowercase__ ( nn.Module ):
def __init__( self : str , _lowercase : List[Any] , _lowercase : List[str]=1E-6 ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Parameter(torch.ones(_lowercase ) )
UpperCAmelCase__ = eps
def _UpperCAmelCase ( self : int , _lowercase : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowercase )
UpperCAmelCase__ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCAmelCase__ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowercase__ ( nn.Module ):
def _UpperCAmelCase ( self : int , _lowercase : torch.Tensor ):
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_lowercase , 3.0 )) ))
class lowercase__ ( nn.Module ):
def __init__( self : Optional[Any] , _lowercase : List[str] , _lowercase : Dict ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = nn.Linear(_lowercase , out_features * 2 , bias=_lowercase )
def _UpperCAmelCase ( self : List[str] , _lowercase : Any , _lowercase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.scale_bias(_lowercase )
UpperCAmelCase__ , UpperCAmelCase__ = torch.chunk(_lowercase , 2 , -1 )
UpperCAmelCase__ = x * (1 + scale) + shift
return x
| 475 | 1 |
"""simple docstring"""
lowerCAmelCase_ = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 122 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = StableDiffusionXLImgaImgPipeline
lowerCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
lowerCAmelCase : List[str] = PipelineTesterMixin.required_optional_params - {"latents"}
lowerCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,attention_head_dim=(2, 4) ,use_linear_projection=_snake_case ,addition_embed_type='''text_time''' ,addition_time_embed_dim=8 ,transformer_layers_per_block=(1, 2) ,projection_class_embeddings_input_dim=80 ,cross_attention_dim=64 ,)
lowercase__ : Union[str, Any] = EulerDiscreteScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,steps_offset=1 ,beta_schedule='''scaled_linear''' ,timestep_spacing='''leading''' ,)
torch.manual_seed(0 )
lowercase__ : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
lowercase__ : List[str] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act='''gelu''' ,projection_dim=32 ,)
lowercase__ : Optional[Any] = CLIPTextModel(_snake_case )
lowercase__ : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ,local_files_only=_snake_case )
lowercase__ : Tuple = CLIPTextModelWithProjection(_snake_case )
lowercase__ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ,local_files_only=_snake_case )
lowercase__ : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : Any=0 ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : int = floats_tensor((1, 3, 32, 32) ,rng=random.Random(_snake_case ) ).to(_snake_case )
lowercase__ : Tuple = image / 2 + 0.5
if str(_snake_case ).startswith('''mps''' ):
lowercase__ : int = torch.manual_seed(_snake_case )
else:
lowercase__ : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__ : Dict = self.get_dummy_components()
lowercase__ : Tuple = StableDiffusionXLImgaImgPipeline(**_snake_case )
lowercase__ : Dict = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Dict = self.get_dummy_inputs(_snake_case )
lowercase__ : Dict = sd_pipe(**_snake_case ).images
lowercase__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ : Optional[int] = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def UpperCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
pass
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ : int = self.get_dummy_components()
lowercase__ : Any = StableDiffusionXLImgaImgPipeline(**_snake_case )
lowercase__ : int = sd_pipe.to(_snake_case )
lowercase__ : List[Any] = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
# forward without prompt embeds
lowercase__ : Tuple = self.get_dummy_inputs(_snake_case )
lowercase__ : List[str] = 3 * ['''this is a negative prompt''']
lowercase__ : List[str] = negative_prompt
lowercase__ : Union[str, Any] = 3 * [inputs['''prompt''']]
lowercase__ : List[Any] = sd_pipe(**_snake_case )
lowercase__ : Any = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowercase__ : Optional[int] = self.get_dummy_inputs(_snake_case )
lowercase__ : List[str] = 3 * ['''this is a negative prompt''']
lowercase__ : List[str] = 3 * [inputs.pop('''prompt''' )]
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Optional[int] = sd_pipe.encode_prompt(_snake_case ,negative_prompt=_snake_case )
lowercase__ : Tuple = sd_pipe(
**_snake_case ,prompt_embeds=_snake_case ,negative_prompt_embeds=_snake_case ,pooled_prompt_embeds=_snake_case ,negative_pooled_prompt_embeds=_snake_case ,)
lowercase__ : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : int ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Any ,_snake_case : int ,_snake_case : Any="cpu" ,_snake_case : List[str]=torch.floataa ,_snake_case : Union[str, Any]=0 ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
lowercase__ : Union[str, Any] = np.random.RandomState(_snake_case ).standard_normal((1, 4, 64, 64) )
lowercase__ : int = torch.from_numpy(_snake_case ).to(device=_snake_case ,dtype=_snake_case )
lowercase__ : List[Any] = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ : Dict = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Tuple = self.get_inputs(_snake_case )
lowercase__ : Union[str, Any] = pipe(**_snake_case ).images
lowercase__ : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase__ : List[str] = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 122 | 1 |
"""simple docstring"""
import os
from pathlib import Path
def __A () ->List[str]:
"""simple docstring"""
from torch.utils.cpp_extension import load
lowerCAmelCase__ :List[Any] = Path(_SCREAMING_SNAKE_CASE ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
lowerCAmelCase__ :int = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , _SCREAMING_SNAKE_CASE , with_cuda=_SCREAMING_SNAKE_CASE , extra_include_paths=[str(_SCREAMING_SNAKE_CASE )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 93 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :torch.FloatTensor
class _lowerCAmelCase ( a , a ):
"""simple docstring"""
@register_to_config
def __init__( self , __UpperCAmelCase = 1_6 , __UpperCAmelCase = 8_8 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 0.0 , __UpperCAmelCase = 3_2 , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = "geglu" , __UpperCAmelCase = True , __UpperCAmelCase = True , ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :Dict = num_attention_heads
lowerCAmelCase__ :Any = attention_head_dim
lowerCAmelCase__ :Optional[int] = num_attention_heads * attention_head_dim
lowerCAmelCase__ :Any = in_channels
lowerCAmelCase__ :str = torch.nn.GroupNorm(num_groups=__UpperCAmelCase , num_channels=__UpperCAmelCase , eps=1E-6 , affine=__UpperCAmelCase )
lowerCAmelCase__ :int = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
# 3. Define transformers blocks
lowerCAmelCase__ :List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , dropout=__UpperCAmelCase , cross_attention_dim=__UpperCAmelCase , activation_fn=__UpperCAmelCase , attention_bias=__UpperCAmelCase , double_self_attention=__UpperCAmelCase , norm_elementwise_affine=__UpperCAmelCase , )
for d in range(__UpperCAmelCase )
] )
lowerCAmelCase__ :List[Any] = nn.Linear(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase=None , __UpperCAmelCase = True , ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = hidden_states.shape
lowerCAmelCase__ :Tuple = batch_frames // num_frames
lowerCAmelCase__ :str = hidden_states
lowerCAmelCase__ :Union[str, Any] = hidden_states[None, :].reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :str = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCAmelCase__ :Optional[int] = self.norm(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = self.proj_in(__UpperCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowerCAmelCase__ :Optional[int] = block(
__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase , cross_attention_kwargs=__UpperCAmelCase , class_labels=__UpperCAmelCase , )
# 3. Output
lowerCAmelCase__ :Any = self.proj_out(__UpperCAmelCase )
lowerCAmelCase__ :Dict = (
hidden_states[None, None, :]
.reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCAmelCase__ :Optional[Any] = hidden_states.reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__UpperCAmelCase )
| 93 | 1 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class a__ ( unittest.TestCase ):
def lowerCAmelCase ( self : int , A_ : List[str] , A_ : int ) -> List[Any]:
"""simple docstring"""
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(A_ ) for s in shape] )}.npy"""
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase ( self : List[Any] , A_ : List[str]=0 , A_ : List[str]=(4, 4, 64, 64) , A_ : Dict=False ) -> str:
"""simple docstring"""
lowerCamelCase_: Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
lowerCamelCase_: Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(A_ , A_ ) ) , dtype=A_ )
return image
def lowerCAmelCase ( self : List[str] , A_ : Optional[int]=False , A_ : int="CompVis/stable-diffusion-v1-4" ) -> Dict:
"""simple docstring"""
lowerCamelCase_: str = jnp.bfloataa if fpaa else jnp.floataa
lowerCamelCase_: Optional[int] = """bf16""" if fpaa else None
lowerCamelCase_ , lowerCamelCase_: List[str] = FlaxUNetaDConditionModel.from_pretrained(
A_ , subfolder="""unet""" , dtype=A_ , revision=A_ )
return model, params
def lowerCAmelCase ( self : Union[str, Any] , A_ : int=0 , A_ : Dict=(4, 77, 7_68) , A_ : Dict=False ) -> Tuple:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
lowerCamelCase_: Any = jnp.array(load_hf_numpy(self.get_file_format(A_ , A_ ) ) , dtype=A_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 10_00, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def lowerCAmelCase ( self : Any , A_ : Dict , A_ : Any , A_ : str ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_: int = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=A_ )
lowerCamelCase_: Dict = self.get_latents(A_ , fpaa=A_ )
lowerCamelCase_: Any = self.get_encoder_hidden_states(A_ , fpaa=A_ )
lowerCamelCase_: Any = model.apply(
{"""params""": params} , A_ , jnp.array(A_ , dtype=jnp.intaa ) , encoder_hidden_states=A_ , ).sample
assert sample.shape == latents.shape
lowerCamelCase_: Optional[int] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowerCamelCase_: Union[str, Any] = jnp.array(A_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(A_ , A_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 10_00, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def lowerCAmelCase ( self : List[str] , A_ : Tuple , A_ : int , A_ : Any ) -> str:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_: Optional[int] = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=A_ )
lowerCamelCase_: int = self.get_latents(A_ , shape=(4, 4, 96, 96) , fpaa=A_ )
lowerCamelCase_: Optional[Any] = self.get_encoder_hidden_states(A_ , shape=(4, 77, 10_24) , fpaa=A_ )
lowerCamelCase_: str = model.apply(
{"""params""": params} , A_ , jnp.array(A_ , dtype=jnp.intaa ) , encoder_hidden_states=A_ , ).sample
assert sample.shape == latents.shape
lowerCamelCase_: Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
lowerCamelCase_: str = jnp.array(A_ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(A_ , A_ , atol=1e-2 )
| 584 | from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
if attention_mask is None:
lowerCamelCase_: Optional[int] = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class a__ :
_A = OPTConfig
_A = {}
_A = "gelu"
def __init__( self : int , A_ : List[str] , A_ : Dict=13 , A_ : str=7 , A_ : Dict=True , A_ : int=False , A_ : Any=99 , A_ : Dict=16 , A_ : List[str]=2 , A_ : Dict=4 , A_ : Dict=4 , A_ : int="gelu" , A_ : Tuple=0.1 , A_ : Tuple=0.1 , A_ : Dict=20 , A_ : int=2 , A_ : List[Any]=1 , A_ : Optional[Any]=0 , A_ : Dict=16 , A_ : Dict=16 , ) -> Dict:
"""simple docstring"""
lowerCamelCase_: str = parent
lowerCamelCase_: Tuple = batch_size
lowerCamelCase_: str = seq_length
lowerCamelCase_: Any = is_training
lowerCamelCase_: Tuple = use_labels
lowerCamelCase_: Any = vocab_size
lowerCamelCase_: Optional[Any] = hidden_size
lowerCamelCase_: Any = num_hidden_layers
lowerCamelCase_: Dict = num_attention_heads
lowerCamelCase_: Optional[Any] = intermediate_size
lowerCamelCase_: Optional[int] = hidden_act
lowerCamelCase_: Any = hidden_dropout_prob
lowerCamelCase_: Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase_: List[Any] = max_position_embeddings
lowerCamelCase_: Union[str, Any] = eos_token_id
lowerCamelCase_: Optional[int] = pad_token_id
lowerCamelCase_: Optional[Any] = bos_token_id
lowerCamelCase_: List[Any] = embed_dim
lowerCamelCase_: Optional[Any] = word_embed_proj_dim
lowerCamelCase_: Any = False
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase_: Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase_: List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase_: Any = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=A_ , **self.config_updates , )
lowerCamelCase_: Optional[Any] = prepare_opt_inputs_dict(A_ , A_ )
return config, inputs_dict
def lowerCAmelCase ( self : Any , A_ : Dict , A_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: List[Any] = TFOPTModel(config=A_ )
lowerCamelCase_: Union[str, Any] = inputs_dict["""input_ids"""]
lowerCamelCase_: List[str] = input_ids[:1, :]
lowerCamelCase_: int = inputs_dict["""attention_mask"""][:1, :]
lowerCamelCase_: Tuple = 1
# first forward pass
lowerCamelCase_: int = model(A_ , attention_mask=A_ , use_cache=A_ )
lowerCamelCase_ , lowerCamelCase_: Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_: List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_: Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase_: Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase_: Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase_: Any = model(A_ , attention_mask=A_ )[0]
lowerCamelCase_: List[str] = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase_: List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase_: Tuple = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase_: List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1e-3 )
@require_tf
class a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_A = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
_A = (TFOPTForCausalLM,) if is_tf_available() else ()
_A = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
_A = False
_A = False
_A = False
_A = 10
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_: List[str] = TFOPTModelTester(self )
lowerCamelCase_: Optional[Any] = ConfigTester(self , config_class=A_ )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(A_ : Optional[Any] , A_ : Union[str, Any] ):
if hasattr(A_ , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(A_ , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowerCamelCase_: List[Any] = model_class(config=A_ )
lowerCamelCase_: List[Any] = _get_word_embedding_weight(A_ , model.get_input_embeddings() )
lowerCamelCase_: List[Any] = _get_word_embedding_weight(A_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(A_ )
lowerCamelCase_: int = _get_word_embedding_weight(A_ , model.get_input_embeddings() )
lowerCamelCase_: List[Any] = _get_word_embedding_weight(A_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCamelCase_: List[Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , A_ )
# check that weights remain the same after resizing
lowerCamelCase_: int = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase_: Tuple = False
self.assertTrue(A_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , A_ )
lowerCamelCase_: Union[str, Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase_: Any = False
self.assertTrue(A_ )
def UpperCAmelCase_ ( _UpperCAmelCase ):
return tf.constant(_UpperCAmelCase , dtype=tf.intaa )
@require_tf
class a__ ( unittest.TestCase ):
_A = 99
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_: Dict = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCamelCase_: int = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCamelCase_: Tuple = input_ids.shape[0]
lowerCamelCase_: Optional[int] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class a__ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_: Dict = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
lowerCamelCase_: Dict = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
lowerCamelCase_: Union[str, Any] = tf.not_equal(A_ , model.config.pad_token_id )
with tf.GradientTape():
lowerCamelCase_: Optional[int] = model(input_ids=A_ , attention_mask=A_ ).last_hidden_state
lowerCamelCase_: Dict = (1, 11, 5_12)
self.assertEqual(output.shape , A_ )
lowerCamelCase_: int = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , A_ , atol=4e-3 ) )
lowerCamelCase_: Any = tf.function(A_ , jit_compile=A_ )
lowerCamelCase_: int = xla_generate(A_ , A_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , A_ , atol=4e-2 ) )
@require_tf
@slow
class a__ ( unittest.TestCase ):
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
lowerCamelCase_: List[str] = """facebook/opt-350m"""
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCamelCase_: Tuple = GPTaTokenizer.from_pretrained(self.path_model )
lowerCamelCase_: Optional[int] = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCamelCase_: int = tokenizer(A_ , return_tensors="""tf""" , padding=A_ , add_special_tokens=A_ )
lowerCamelCase_: List[str] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCamelCase_: int = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(A_ , A_ , atol=1e-4 ) )
lowerCamelCase_: Any = tf.function(A_ , jit_compile=A_ )
lowerCamelCase_: Any = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(A_ , A_ , atol=1e-4 ) )
@require_tf
@slow
class a__ ( unittest.TestCase ):
@property
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_: Dict = """facebook/opt-125m"""
lowerCamelCase_: Optional[int] = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase_: Union[str, Any] = []
lowerCamelCase_: str = GPTaTokenizer.from_pretrained(A_ )
lowerCamelCase_: Union[str, Any] = TFOPTForCausalLM.from_pretrained(A_ )
for prompt in self.prompts:
lowerCamelCase_: int = tokenizer(A_ , return_tensors="""tf""" ).input_ids
lowerCamelCase_: Optional[Any] = model.generate(A_ , max_length=10 )
lowerCamelCase_: List[Any] = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
predicted_outputs += generated_string
self.assertListEqual(A_ , A_ )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = """facebook/opt-350m"""
lowerCamelCase_: Optional[int] = GPTaTokenizer.from_pretrained(A_ )
lowerCamelCase_: Union[str, Any] = TFOPTForCausalLM.from_pretrained(A_ )
lowerCamelCase_: Optional[int] = """left"""
# use different length sentences to test batching
lowerCamelCase_: str = [
"""Hello, my dog is a little""",
"""Today, I""",
]
lowerCamelCase_: Any = tokenizer(A_ , return_tensors="""tf""" , padding=A_ )
lowerCamelCase_: int = inputs["""input_ids"""]
lowerCamelCase_: List[str] = model.generate(input_ids=A_ , attention_mask=inputs["""attention_mask"""] )
lowerCamelCase_: Tuple = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowerCamelCase_: Optional[int] = model.generate(input_ids=A_ )
lowerCamelCase_: Union[str, Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
lowerCamelCase_: Union[str, Any] = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowerCamelCase_: Dict = model.generate(input_ids=A_ , max_length=model.config.max_length - num_paddings )
lowerCamelCase_: int = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
lowerCamelCase_: Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_ )
lowerCamelCase_: Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=A_ )
lowerCamelCase_: Any = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , [non_padded_sentence, padded_sentence] )
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_: Dict = """facebook/opt-350m"""
lowerCamelCase_: Any = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase_: Union[str, Any] = []
lowerCamelCase_: Dict = GPTaTokenizer.from_pretrained(A_ )
lowerCamelCase_: Union[str, Any] = TFOPTForCausalLM.from_pretrained(A_ )
for prompt in self.prompts:
lowerCamelCase_: List[str] = tokenizer(A_ , return_tensors="""tf""" ).input_ids
lowerCamelCase_: Dict = model.generate(A_ , max_length=10 )
lowerCamelCase_: Optional[Any] = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
predicted_outputs += generated_string
self.assertListEqual(A_ , A_ )
| 584 | 1 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowercase_ = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
# Initialise PyTorch model
lowercase__ = XLNetConfig.from_json_file(_SCREAMING_SNAKE_CASE )
lowercase__ = finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase__ = finetuning_task
lowercase__ = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase__ = XLNetForSequenceClassification(_SCREAMING_SNAKE_CASE )
elif "squad" in finetuning_task:
lowercase__ = finetuning_task
lowercase__ = XLNetForQuestionAnswering(_SCREAMING_SNAKE_CASE )
else:
lowercase__ = XLNetLMHeadModel(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F"""Save PyTorch model to {os.path.abspath(_SCREAMING_SNAKE_CASE )}""" )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
print(F"""Save configuration file to {os.path.abspath(_SCREAMING_SNAKE_CASE )}""" )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
lowercase_ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 235 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """spm_char.model"""}
lowercase_ = {
"""vocab_file""": {
"""microsoft/speecht5_asr""": """https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model""",
"""microsoft/speecht5_tts""": """https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model""",
"""microsoft/speecht5_vc""": """https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model""",
}
}
lowercase_ = {
"""microsoft/speecht5_asr""": 1_024,
"""microsoft/speecht5_tts""": 1_024,
"""microsoft/speecht5_vc""": 1_024,
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : str = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , a : Any , a : Any="<s>" , a : List[Any]="</s>" , a : List[str]="<unk>" , a : Any="<pad>" , a : Optional[Dict[str, Any]] = None , **a : Optional[Any] , )-> None:
"""simple docstring"""
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , pad_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Tuple:
"""simple docstring"""
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE_ ( self : int )-> Tuple:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] )-> str:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : Dict , a : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : str )-> List[str]:
"""simple docstring"""
return self.sp_model.encode(a , out_type=a )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] )-> str:
"""simple docstring"""
return self.sp_model.piece_to_id(a )
def SCREAMING_SNAKE_CASE_ ( self : str , a : List[Any] )-> Dict:
"""simple docstring"""
lowercase__ = self.sp_model.IdToPiece(a )
return token
def SCREAMING_SNAKE_CASE_ ( self : str , a : Dict )-> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a ) + token
lowercase__ = []
else:
current_sub_tokens.append(a )
out_string += self.sp_model.decode(a )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self : str , a : List[Any] , a : Optional[Any]=None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self : int , a : List[int] , a : Optional[List[int]] = None , a : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
lowercase__ = [1]
if token_ids_a is None:
return ([0] * len(a )) + suffix_ones
return ([0] * len(a )) + ([0] * len(a )) + suffix_ones
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
| 235 | 1 |
'''simple docstring'''
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_UpperCamelCase : int = get_logger(__name__)
_UpperCamelCase : List[Any] = Path(__file__).parent / 'model_card_template.md'
_UpperCamelCase : Optional[Any] = uuida().hex
_UpperCamelCase : Optional[int] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
_UpperCamelCase : List[Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
_UpperCamelCase : Optional[int] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def __UpperCAmelCase ( A : Union[Dict, str, None] = None ) -> str:
UpperCAmelCase_ : Optional[Any] = F"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"; torch/{_torch_version}"
if is_flax_available():
ua += F"; jax/{_jax_version}"
ua += F"; flax/{_flax_version}"
if is_onnx_available():
ua += F"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(A , A ):
ua += "; " + "; ".join(F"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(A , A ):
ua += "; " + user_agent
return ua
def __UpperCAmelCase ( A : str , A : Optional[str] = None , A : Optional[str] = None ) -> Dict:
if token is None:
UpperCAmelCase_ : Any = HfFolder.get_token()
if organization is None:
UpperCAmelCase_ : Union[str, Any] = whoami(A )['''name''']
return F"{username}/{model_id}"
else:
return F"{organization}/{model_id}"
def __UpperCAmelCase ( A : Union[str, Any] , A : Any ) -> Any:
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(A , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
UpperCAmelCase_ : Optional[int] = args.hub_token if hasattr(A , '''hub_token''' ) else None
UpperCAmelCase_ : List[Any] = get_full_repo_name(A , token=A )
UpperCAmelCase_ : Optional[int] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=A , model_name=A , repo_name=A , dataset_name=args.dataset_name if hasattr(A , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(A , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(A , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(A , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(A , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(A , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(A , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(A , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(A , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(A , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(A , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
UpperCAmelCase_ : Dict = os.path.join(args.output_dir , '''README.md''' )
model_card.save(A )
def __UpperCAmelCase ( A : Optional[str] , A : Optional[str] = None ) -> Any:
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCAmelCase_ : Tuple = str(Path(A ).as_posix() )
UpperCAmelCase_ : Union[str, Any] = re.search(r'''snapshots/([^/]+)/''' , A )
if search is None:
return None
UpperCAmelCase_ : List[Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(A ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_UpperCamelCase : Optional[Any] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
_UpperCamelCase : Tuple = os.path.join(hf_cache_home, 'diffusers')
def __UpperCAmelCase ( A : Optional[str] = None , A : Optional[str] = None ) -> None:
if new_cache_dir is None:
UpperCAmelCase_ : Optional[int] = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCAmelCase_ : List[str] = old_diffusers_cache
UpperCAmelCase_ : int = Path(A ).expanduser()
UpperCAmelCase_ : Optional[Any] = Path(A ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCAmelCase_ : str = new_cache_dir / old_blob_path.relative_to(A )
new_blob_path.parent.mkdir(parents=A , exist_ok=A )
os.replace(A , A )
try:
os.symlink(A , A )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_UpperCamelCase : str = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
_UpperCamelCase : str = 0
else:
with open(cache_version_file) as f:
try:
_UpperCamelCase : Union[str, Any] = int(f.read())
except ValueError:
_UpperCamelCase : List[str] = 0
if cache_version < 1:
_UpperCamelCase : int = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
_UpperCamelCase : Dict = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'the directory exists and can be written to.'
)
def __UpperCAmelCase ( A : str , A : Optional[str] = None ) -> str:
if variant is not None:
UpperCAmelCase_ : Dict = weights_name.split('''.''' )
UpperCAmelCase_ : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
UpperCAmelCase_ : str = '''.'''.join(A )
return weights_name
def __UpperCAmelCase ( A : str , *,
A : Optional[Any] , A : List[Any] , A : Any , A : List[str] , A : Dict , A : Any , A : List[str] , A : Any , A : int , A : List[Any] , A : Any=None , ) -> int:
UpperCAmelCase_ : List[str] = str(A )
if os.path.isfile(A ):
return pretrained_model_name_or_path
elif os.path.isdir(A ):
if os.path.isfile(os.path.join(A , A ) ):
# Load from a PyTorch checkpoint
UpperCAmelCase_ : Tuple = os.path.join(A , A )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(A , A , A ) ):
UpperCAmelCase_ : Optional[int] = os.path.join(A , A , A )
return model_file
else:
raise EnvironmentError(
F"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(A ).base_version ) >= version.parse('''0.20.0''' )
):
try:
UpperCAmelCase_ : int = hf_hub_download(
A , filename=_add_variant(A , A ) , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , use_auth_token=A , user_agent=A , subfolder=A , revision=revision or commit_hash , )
warnings.warn(
F"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , A , )
return model_file
except: # noqa: E722
warnings.warn(
F"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(A , A )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(A , A )}' so that the correct variant file can be added." , A , )
try:
# 2. Load model file as usual
UpperCAmelCase_ : List[str] = hf_hub_download(
A , filename=A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , use_auth_token=A , user_agent=A , subfolder=A , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
'''this model name. Check the model page at '''
F"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
F"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
F"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
F"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
F" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
F" directory containing a file named {weights_name} or"
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
F"containing a file named {weights_name}" )
| 216 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = KandinskyImgaImgPipeline
a_ = ["prompt", "image_embeds", "negative_image_embeds", "image"]
a_ = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
a_ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a_ = False
@property
def A ( self : Optional[Any] ) -> Tuple:
return 32
@property
def A ( self : Tuple ) -> Tuple:
return 32
@property
def A ( self : str ) -> List[str]:
return self.time_input_dim
@property
def A ( self : List[str] ) -> int:
return self.time_input_dim * 4
@property
def A ( self : int ) -> str:
return 1_00
@property
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def A ( self : Optional[Any] ) -> int:
torch.manual_seed(0 )
UpperCAmelCase_ : int = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
UpperCAmelCase_ : str = MultilingualCLIP(_A )
UpperCAmelCase_ : Tuple = text_encoder.eval()
return text_encoder
@property
def A ( self : int ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase_ : Tuple = UNetaDConditionModel(**_A )
return model
@property
def A ( self : List[str] ) -> Tuple:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A ( self : str ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : Any = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : Any ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase_ : Optional[int] = self.dummy_tokenizer
UpperCAmelCase_ : Optional[int] = self.dummy_unet
UpperCAmelCase_ : Optional[Any] = self.dummy_movq
UpperCAmelCase_ : Optional[int] = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00_085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
UpperCAmelCase_ : Tuple = DDIMScheduler(**_A )
UpperCAmelCase_ : str = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def A ( self : str , _A : Optional[int] , _A : Union[str, Any]=0 ) -> str:
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_A )
# create init_image
UpperCAmelCase_ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Optional[Any] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Tuple = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Union[str, Any] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def A ( self : Dict ) -> int:
UpperCAmelCase_ : str = '''cpu'''
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_A )
UpperCAmelCase_ : Tuple = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : List[str] = pipe(**self.get_dummy_inputs(_A ) )
UpperCAmelCase_ : Optional[int] = output.images
UpperCAmelCase_ : List[Any] = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase):
def A ( self : Tuple ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
UpperCAmelCase_ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCAmelCase_ : Tuple = '''A red cartoon frog, 4k'''
UpperCAmelCase_ : Dict = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
UpperCAmelCase_ : Any = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
UpperCAmelCase_ : Optional[int] = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ : str = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase_ : Any = pipeline(
_A , image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
UpperCAmelCase_ : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_A , _A )
| 216 | 1 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( UpperCAmelCase__ ) -> list[int]:
'''simple docstring'''
if len(UpperCAmelCase__ ) == 0:
return array
a__ , a__ = min(UpperCAmelCase__ ), max(UpperCAmelCase__ )
# Compute the variables
a__ = _max - _min + 1
a__ , a__ = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
a__ = i - _min
a__ = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
a__ = 0
for i in range(UpperCAmelCase__ ):
while holes_repeat[i] > 0:
a__ = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input("Enter numbers separated by comma:\n")
__magic_name__ = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 232 | """simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> List[str]:
'''simple docstring'''
a__ = args.log_outputs
a__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
a__ = load_metric('wer' )
a__ = load_metric('cer' )
# compute metrics
a__ = wer.compute(references=result['target'],predictions=result['prediction'] )
a__ = cer.compute(references=result['target'],predictions=result['prediction'] )
# print & log results
a__ = f'''WER: {wer_result}\nCER: {cer_result}'''
print(UpperCAmelCase__ )
with open(f'''{dataset_id}_eval_results.txt''','w' ) as f:
f.write(UpperCAmelCase__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
a__ = f'''log_{dataset_id}_predictions.txt'''
a__ = f'''log_{dataset_id}_targets.txt'''
with open(UpperCAmelCase__,'w' ) as p, open(UpperCAmelCase__,'w' ) as t:
# mapping function to write output
def write_to_file(UpperCAmelCase__,UpperCAmelCase__ ):
p.write(f'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(UpperCAmelCase__,with_indices=UpperCAmelCase__ )
def _lowerCamelCase ( UpperCAmelCase__ ) -> str:
'''simple docstring'''
a__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
a__ = re.sub(UpperCAmelCase__,'',text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
a__ = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
a__ = ' '.join(text.split(UpperCAmelCase__ ) )
return text
def _lowerCamelCase ( UpperCAmelCase__ ) -> Dict:
'''simple docstring'''
a__ = load_dataset(args.dataset,args.config,split=args.split,use_auth_token=UpperCAmelCase__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
a__ = AutoFeatureExtractor.from_pretrained(args.model_id )
a__ = feature_extractor.sampling_rate
# resample audio
a__ = dataset.cast_column('audio',Audio(sampling_rate=UpperCAmelCase__ ) )
# load eval pipeline
if args.device is None:
a__ = 0 if torch.cuda.is_available() else -1
a__ = pipeline('automatic-speech-recognition',model=args.model_id,device=args.device )
# map function to decode audio
def map_to_pred(UpperCAmelCase__ ):
a__ = asr(
batch['audio']['array'],chunk_length_s=args.chunk_length_s,stride_length_s=args.stride_length_s )
a__ = prediction['text']
a__ = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
a__ = dataset.map(UpperCAmelCase__,remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(UpperCAmelCase__,UpperCAmelCase__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
__magic_name__ = parser.parse_args()
main(args)
| 232 | 1 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=[30, 30] , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.0_2 , UpperCamelCase__=3 , UpperCamelCase__=None , UpperCamelCase__=8 , UpperCamelCase__=10 , ):
A__ : Optional[int] = parent
A__ : List[Any] = batch_size
A__ : Dict = image_size
A__ : Any = patch_size
A__ : Dict = num_channels
A__ : List[Any] = is_training
A__ : int = use_labels
A__ : Any = hidden_size
A__ : List[str] = num_hidden_layers
A__ : Optional[int] = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : str = hidden_act
A__ : str = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : Optional[int] = type_sequence_label_size
A__ : Any = initializer_range
A__ : Optional[int] = num_labels
A__ : Union[str, Any] = scope
A__ : Union[str, Any] = n_targets
A__ : Dict = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A__ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A__ : List[str] = num_patches + 1 + self.num_detection_tokens
def __snake_case ( self ):
A__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A__ : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A__ : Tuple = []
for i in range(self.batch_size ):
A__ : List[Any] = {}
A__ : Tuple = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase__ )
A__ : Any = torch.rand(self.n_targets , 4 , device=UpperCamelCase__ )
labels.append(UpperCamelCase__ )
A__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Tuple = YolosModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any = YolosForObjectDetection(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ )
A__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A__ : Union[str, Any] = model(pixel_values=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __snake_case ( self ):
A__ : Optional[int] = self.prepare_config_and_inputs()
A__ , A__ , A__ : Optional[Any] = config_and_inputs
A__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCAmelCase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
A__ : Optional[int] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A__ : str = []
for i in range(self.model_tester.batch_size ):
A__ : int = {}
A__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase__ , dtype=torch.long )
A__ : Dict = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase__ , dtype=torch.float )
labels.append(UpperCamelCase__ )
A__ : Dict = labels
return inputs_dict
def __snake_case ( self ):
A__ : List[Any] = YolosModelTester(self )
A__ : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
# YOLOS does not use inputs_embeds
pass
def __snake_case ( self ):
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def __snake_case ( self ):
A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] = model_class(UpperCamelCase__ )
A__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] = [*signature.parameters.keys()]
A__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __snake_case ( self ):
A__ , A__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Tuple = True
# in YOLOS, the seq_len is different
A__ : List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A__ : Any = True
A__ : Optional[int] = False
A__ : Optional[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[int] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Tuple = True
A__ : Optional[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A__ : List[Any] = len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : List[str] = True
A__ : List[Any] = True
A__ : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) )
A__ : List[str] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] = outputs.hidden_states
A__ : int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# YOLOS has a different seq_length
A__ : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Optional[int] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __snake_case ( self ):
A__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase__ )
@slow
def __snake_case ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = YolosModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
A__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __snake_case ( self ):
A__ : Tuple = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(UpperCamelCase__ )
A__ : str = self.default_image_processor
A__ : Tuple = prepare_img()
A__ : Tuple = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Any = model(inputs.pixel_values )
# verify outputs
A__ : List[Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=UpperCamelCase__ , )
A__ : Optional[int] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify postprocessing
A__ : Dict = image_processor.post_process_object_detection(
UpperCamelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A__ : int = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(UpperCamelCase__ )
A__ : str = [75, 75, 17, 63, 17]
A__ : Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(UpperCamelCase__ )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , UpperCamelCase__ , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , UpperCamelCase__ )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , UpperCamelCase__ ) ) | 55 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : int=False ) -> Tuple:
"""simple docstring"""
try:
A__ : Dict = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
A__ : Union[str, Any] = strtobool(__UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
_SCREAMING_SNAKE_CASE : Union[str, Any] = parse_flag_from_env('RUN_SLOW', default=False)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> str:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Any:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if test_case is None:
return partial(__UpperCamelCase , version=__UpperCamelCase )
return unittest.skipUnless(is_torch_version('''>=''' , __UpperCamelCase ) , F"test requires torch version >= {version}" )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__UpperCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__UpperCamelCase )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = True
@classmethod
def __snake_case ( cls ):
A__ : Tuple = tempfile.mkdtemp()
@classmethod
def __snake_case ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __snake_case ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase__ )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self , UpperCamelCase__ ):
A__ : Tuple = mocks if isinstance(UpperCamelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> Any:
"""simple docstring"""
A__ : int = AcceleratorState()
A__ : Any = tensor[None].clone().to(state.device )
A__ : Optional[int] = gather(__UpperCamelCase ).cpu()
A__ : Any = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __UpperCamelCase ):
return False
return True
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A__ : List[Any] = returncode
A__ : Union[str, Any] = stdout
A__ : Dict = stderr
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
while True:
A__ : Tuple = await stream.readline()
if line:
callback(__UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Tuple=False , __UpperCamelCase : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__UpperCamelCase ) )
A__ : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ : List[Any] = []
A__ : str = []
def tee(__UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any]="" ):
A__ : Optional[Any] = line.decode('''utf-8''' ).rstrip()
sink.append(__UpperCamelCase )
if not quiet:
print(__UpperCamelCase , __UpperCamelCase , file=__UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(__UpperCamelCase , __UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=__UpperCamelCase , )
return _RunOutput(await p.wait() , __UpperCamelCase , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=1_80 , __UpperCamelCase : List[str]=False , __UpperCamelCase : Dict=True ) -> _RunOutput:
"""simple docstring"""
A__ : Dict = asyncio.get_event_loop()
A__ : Optional[Any] = loop.run_until_complete(
_stream_subprocess(__UpperCamelCase , env=__UpperCamelCase , stdin=__UpperCamelCase , timeout=__UpperCamelCase , quiet=__UpperCamelCase , echo=__UpperCamelCase ) )
A__ : Union[str, Any] = ''' '''.join(__UpperCamelCase )
if result.returncode > 0:
A__ : Optional[Any] = '''\n'''.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=False ) -> Dict:
"""simple docstring"""
try:
A__ : List[Any] = subprocess.check_output(__UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__UpperCamelCase , '''decode''' ):
A__ : Any = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e | 55 | 1 |
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def _A (__a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = model.config
SCREAMING_SNAKE_CASE_ : List[str] = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
SCREAMING_SNAKE_CASE_ : List[str] = MBartConfig(
is_decoder=_lowercase , is_encoder_decoder=_lowercase , add_cross_attention=_lowercase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=_lowercase , add_final_layer_norm=_lowercase , )
return encoder_config, decoder_config
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
if "encoder.model" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
SCREAMING_SNAKE_CASE_ : List[Any] = '''encoder.''' + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_ : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
SCREAMING_SNAKE_CASE_ : Optional[int] = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
SCREAMING_SNAKE_CASE_ : Tuple = '''encoder.layernorm.bias'''
return name
def _A (__a , __a ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = orig_state_dict.pop(_lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_ : List[Any] = key.split('''.''' )
SCREAMING_SNAKE_CASE_ : List[Any] = int(key_split[3] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(key_split[5] )
SCREAMING_SNAKE_CASE_ : int = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE_ : List[str] = val[:dim, :]
SCREAMING_SNAKE_CASE_ : int = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_ : int = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val[:dim]
SCREAMING_SNAKE_CASE_ : int = val[dim : dim * 2]
SCREAMING_SNAKE_CASE_ : List[str] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = val
return orig_state_dict
def _A (__a , __a=None , __a=False ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = DonutModel.from_pretrained(_lowercase ).eval()
# load HuggingFace model
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = get_configs(_lowercase )
SCREAMING_SNAKE_CASE_ : str = DonutSwinModel(_lowercase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = MBartForCausalLM(_lowercase )
SCREAMING_SNAKE_CASE_ : str = VisionEncoderDecoderModel(encoder=_lowercase , decoder=_lowercase )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = original_model.state_dict()
SCREAMING_SNAKE_CASE_ : int = convert_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase )
# verify results on scanned document
SCREAMING_SNAKE_CASE_ : List[str] = load_dataset('''hf-internal-testing/example-documents''' )
SCREAMING_SNAKE_CASE_ : int = dataset['''test'''][0]['''image'''].convert('''RGB''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = XLMRobertaTokenizerFast.from_pretrained(_lowercase , from_slow=_lowercase )
SCREAMING_SNAKE_CASE_ : str = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
SCREAMING_SNAKE_CASE_ : str = DonutProcessor(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE_ : Dict = processor(_lowercase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
SCREAMING_SNAKE_CASE_ : Dict = '''When is the coffee break?'''
SCREAMING_SNAKE_CASE_ : str = task_prompt.replace('''{user_input}''' , _lowercase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
SCREAMING_SNAKE_CASE_ : List[Any] = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
SCREAMING_SNAKE_CASE_ : Tuple = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
SCREAMING_SNAKE_CASE_ : List[Any] = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
SCREAMING_SNAKE_CASE_ : str = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
SCREAMING_SNAKE_CASE_ : Tuple = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = original_model.decoder.tokenizer(_lowercase , add_special_tokens=_lowercase , return_tensors='''pt''' )[
'''input_ids'''
]
SCREAMING_SNAKE_CASE_ : List[str] = original_model.encoder.model.patch_embed(_lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = model.encoder.embeddings(_lowercase )
assert torch.allclose(_lowercase , _lowercase , atol=1e-3 )
# verify encoder hidden states
SCREAMING_SNAKE_CASE_ : Tuple = original_model.encoder(_lowercase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model.encoder(_lowercase ).last_hidden_state
assert torch.allclose(_lowercase , _lowercase , atol=1e-2 )
# verify decoder hidden states
SCREAMING_SNAKE_CASE_ : int = original_model(_lowercase , _lowercase , _lowercase ).logits
SCREAMING_SNAKE_CASE_ : List[str] = model(_lowercase , decoder_input_ids=_lowercase ).logits
assert torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""naver-clova-ix/donut-base-finetuned-docvqa""",
required=False,
type=str,
help="""Name of the original model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
required=False,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub.""",
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 512 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'spiece.model'}
__A = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
def __init__( self: Any , __A: Tuple , __A: int=False , __A: Tuple=True , __A: Optional[Any]=False , __A: int="<s>" , __A: Union[str, Any]="</s>" , __A: Dict="<unk>" , __A: int="<sep>" , __A: Dict="<pad>" , __A: Union[str, Any]="<cls>" , __A: Optional[int]="<mask>" , __A: Optional[Any]=["<eop>", "<eod>"] , __A: Optional[Dict[str, Any]] = None , **__A: List[Any] , ) -> None:
_A = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , additional_special_tokens=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
_A = 3
_A = do_lower_case
_A = remove_space
_A = keep_accents
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
_A = jieba
_A = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __A ( self: Optional[Any] ) -> Optional[Any]:
return len(self.sp_model )
def __A ( self: int ) -> Optional[Any]:
_A = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[Any] ) -> int:
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self: List[Any] , __A: List[Any] ) -> str:
_A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self: int , __A: Dict ) -> Dict:
if self.remove_space:
_A = ''' '''.join(inputs.strip().split() )
else:
_A = inputs
_A = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
_A = unicodedata.normalize('''NFKD''' , __A )
_A = ''''''.join([c for c in outputs if not unicodedata.combining(__A )] )
if self.do_lower_case:
_A = outputs.lower()
return outputs
def __A ( self: List[Any] , __A: str ) -> List[str]:
_A = self.preprocess_text(__A )
_A = self.sp_model.encode(__A , out_type=__A )
_A = []
for piece in pieces:
if len(__A ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
_A = self.sp_model.EncodeAsPieces(piece[:-1].replace(__A , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_A = cur_pieces[1:]
else:
_A = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__A )
else:
new_pieces.append(__A )
return new_pieces
def __A ( self: str , __A: List[Any] ) -> Any:
return self.sp_model.PieceToId(__A )
def __A ( self: List[str] , __A: Union[str, Any] ) -> Tuple:
return self.sp_model.IdToPiece(__A )
def __A ( self: List[str] , __A: Optional[Any] ) -> Dict:
_A = ''''''.join(__A ).replace(__A , ''' ''' ).strip()
return out_string
def __A ( self: str , __A: List[int] , __A: Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __A ( self: Dict , __A: List[int] , __A: Optional[List[int]] = None , __A: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is not None:
return ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1, 1]
return ([0] * len(__A )) + [1, 1]
def __A ( self: Optional[Any] , __A: List[int] , __A: Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __A ( self: str , __A: str , __A: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A = os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , '''wb''' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def __A ( self: Tuple , *__A: str , **__A: List[Any] ) -> Any:
_A = super()._decode(*__A , **__A )
_A = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 484 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
_lowerCamelCase : str = random.Random()
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int]=1.0 , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Dict=None ):
if rng is None:
SCREAMING_SNAKE_CASE = global_rng
SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowercase ( unittest.TestCase ):
def __init__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any]=7 , _UpperCamelCase : Dict=400 , _UpperCamelCase : Tuple=2_000 , _UpperCamelCase : Any=1 , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : List[Any]=16_000 , _UpperCamelCase : Dict=True , _UpperCamelCase : Any=80 , _UpperCamelCase : str=16 , _UpperCamelCase : Tuple=64 , _UpperCamelCase : Optional[Any]="hann_window" , _UpperCamelCase : Dict=80 , _UpperCamelCase : List[str]=7_600 , _UpperCamelCase : Union[str, Any]=1e-10 , _UpperCamelCase : Optional[Any]=True , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = min_seq_length
SCREAMING_SNAKE_CASE = max_seq_length
SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = padding_value
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = num_mel_bins
SCREAMING_SNAKE_CASE = hop_length
SCREAMING_SNAKE_CASE = win_length
SCREAMING_SNAKE_CASE = win_function
SCREAMING_SNAKE_CASE = fmin
SCREAMING_SNAKE_CASE = fmax
SCREAMING_SNAKE_CASE = mel_floor
SCREAMING_SNAKE_CASE = return_attention_mask
def __snake_case( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __snake_case( self : List[Any] , _UpperCamelCase : List[Any]=False , _UpperCamelCase : List[Any]=False ) -> Optional[int]:
'''simple docstring'''
def _flatten(_UpperCamelCase : List[Any] ):
return list(itertools.chain(*_UpperCamelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase ) for x in speech_inputs]
return speech_inputs
def __snake_case( self : int , _UpperCamelCase : Any=False , _UpperCamelCase : Dict=False ) -> List[Any]:
'''simple docstring'''
if equal_length:
SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowercase ( a , unittest.TestCase ):
lowercase__ : List[Any] = SpeechTaFeatureExtractor
def __snake_case( self : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SpeechTaFeatureExtractionTester(self )
def __snake_case( self : Any , _UpperCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(_UpperCamelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_UpperCamelCase , axis=0 ) - 1 ) < 1e-3 ) )
def __snake_case( self : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
SCREAMING_SNAKE_CASE = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE = feat_extract(_UpperCamelCase , return_tensors="np" ).input_values
SCREAMING_SNAKE_CASE = feat_extract(_UpperCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def __snake_case( self : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE = ["longest", "max_length", "do_not_pad"]
SCREAMING_SNAKE_CASE = [None, 1_600, None]
for max_length, padding in zip(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = feat_extract(_UpperCamelCase , padding=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors="np" )
SCREAMING_SNAKE_CASE = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def __snake_case( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE = range(800 , 1_400 , 200 )
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE = ["longest", "max_length", "do_not_pad"]
SCREAMING_SNAKE_CASE = [None, 1_600, None]
for max_length, padding in zip(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = feat_extract(_UpperCamelCase , max_length=_UpperCamelCase , padding=_UpperCamelCase )
SCREAMING_SNAKE_CASE = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def __snake_case( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE = feat_extract(
_UpperCamelCase , truncation=_UpperCamelCase , max_length=1_000 , padding="max_length" , return_tensors="np" )
SCREAMING_SNAKE_CASE = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __snake_case( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE = feat_extract(
_UpperCamelCase , truncation=_UpperCamelCase , max_length=1_000 , padding="longest" , return_tensors="np" )
SCREAMING_SNAKE_CASE = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE = feat_extract(
_UpperCamelCase , truncation=_UpperCamelCase , max_length=2_000 , padding="longest" , return_tensors="np" )
SCREAMING_SNAKE_CASE = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def __snake_case( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE = feature_extractor(audio_target=_UpperCamelCase , padding=_UpperCamelCase , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE = feature_extractor(_UpperCamelCase , return_tensors="np" ).input_values
SCREAMING_SNAKE_CASE = feature_extractor(_UpperCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE = np.asarray(_UpperCamelCase )
SCREAMING_SNAKE_CASE = feature_extractor(_UpperCamelCase , return_tensors="np" ).input_values
SCREAMING_SNAKE_CASE = feature_extractor(_UpperCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
def __snake_case( self : Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_UpperCamelCase ) == len(_UpperCamelCase ) for x, y in zip(_UpperCamelCase , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_UpperCamelCase )
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
SCREAMING_SNAKE_CASE = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __snake_case( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
SCREAMING_SNAKE_CASE = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __snake_case( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE = feat_extract.pad(_UpperCamelCase , padding="longest" , return_tensors="np" )[input_name]
SCREAMING_SNAKE_CASE = feat_extract.pad(_UpperCamelCase , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def __snake_case( self : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feat_extract_dict
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE = [len(_UpperCamelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE = feat_extract.pad(_UpperCamelCase , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , _UpperCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _UpperCamelCase )
def __snake_case( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feat_extract_dict
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE = [len(_UpperCamelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE = min(_UpperCamelCase )
SCREAMING_SNAKE_CASE = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE = feat_extract.pad(
_UpperCamelCase , padding="max_length" , max_length=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors="np" )
self.assertIn("attention_mask" , _UpperCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __snake_case( self : Dict , _UpperCamelCase : Union[str, Any] ) -> str:
'''simple docstring'''
from datasets import load_dataset
SCREAMING_SNAKE_CASE = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE = ds.sort("id" ).select(range(_UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __snake_case( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.tensor(
[2.3_804e-03, 2.0_752e-03, 1.9_836e-03, 2.1_057e-03, 1.6_174e-03,
3.0_518e-04, 9.1_553e-05, 3.3_569e-04, 9.7_656e-04, 1.8_311e-03,
2.0_142e-03, 2.1_057e-03, 1.7_395e-03, 4.5_776e-04, -3.9_673e-04,
4.5_776e-04, 1.0_071e-03, 9.1_553e-05, 4.8_828e-04, 1.1_597e-03,
7.3_242e-04, 9.4_604e-04, 1.8_005e-03, 1.8_311e-03, 8.8_501e-04,
4.2_725e-04, 4.8_828e-04, 7.3_242e-04, 1.0_986e-03, 2.1_057e-03] )
# fmt: on
SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE = feature_extractor(_UpperCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] , _UpperCamelCase , atol=1e-6 ) )
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE = feature_extractor(audio_target=_UpperCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _UpperCamelCase , atol=1e-4 ) )
| 647 | from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowercase ( a ):
def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : float , **_UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = padding_value
SCREAMING_SNAKE_CASE = kwargs.pop("padding_side" , "right" )
SCREAMING_SNAKE_CASE = kwargs.pop("return_attention_mask" , _UpperCamelCase )
super().__init__(**_UpperCamelCase )
def __snake_case( self : List[Any] , _UpperCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , _UpperCamelCase : Union[bool, str, PaddingStrategy] = True , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
'''simple docstring'''
if isinstance(_UpperCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
SCREAMING_SNAKE_CASE = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_UpperCamelCase ) == 0:
if return_attention_mask:
SCREAMING_SNAKE_CASE = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
SCREAMING_SNAKE_CASE = required_input[0]
if isinstance(_UpperCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
SCREAMING_SNAKE_CASE = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = "tf"
elif is_torch_tensor(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = "pt"
elif isinstance(_UpperCamelCase , (int, float, list, tuple, np.ndarray) ):
SCREAMING_SNAKE_CASE = "np"
else:
raise ValueError(
F"type of {first_element} unknown: {type(_UpperCamelCase )}. "
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
SCREAMING_SNAKE_CASE = to_numpy(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = [to_numpy(_UpperCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
SCREAMING_SNAKE_CASE = self._get_padding_strategies(padding=_UpperCamelCase , max_length=_UpperCamelCase )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
if not all(len(_UpperCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
SCREAMING_SNAKE_CASE = []
for i in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = {k: v[i] for k, v in processed_features.items()}
# truncation
SCREAMING_SNAKE_CASE = self._truncate(
_UpperCamelCase , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , truncation=_UpperCamelCase , )
truncated_inputs.append(_UpperCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
SCREAMING_SNAKE_CASE = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH
SCREAMING_SNAKE_CASE = {}
for i in range(_UpperCamelCase ):
# padding
SCREAMING_SNAKE_CASE = self._pad(
truncated_inputs[i] , max_length=_UpperCamelCase , padding_strategy=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
SCREAMING_SNAKE_CASE = []
if value.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE = value.astype(np.floataa )
batch_outputs[key].append(_UpperCamelCase )
return BatchFeature(_UpperCamelCase , tensor_type=_UpperCamelCase )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_UpperCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
SCREAMING_SNAKE_CASE = np.ones(len(_UpperCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = max_length - len(_UpperCamelCase )
if self.padding_side == "right":
if return_attention_mask:
SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (0, difference) )
SCREAMING_SNAKE_CASE = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
SCREAMING_SNAKE_CASE = np.pad(
_UpperCamelCase , _UpperCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (difference, 0) )
SCREAMING_SNAKE_CASE = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
SCREAMING_SNAKE_CASE = np.pad(
_UpperCamelCase , _UpperCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def __snake_case( self : Dict , _UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> Optional[int]:
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) > max_length
if needs_to_be_truncated:
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
SCREAMING_SNAKE_CASE = processed_features["attention_mask"][:max_length]
return processed_features
def __snake_case( self : Optional[Any] , _UpperCamelCase : int=False , _UpperCamelCase : Tuple=None ) -> Tuple:
'''simple docstring'''
if padding is not False:
if padding is True:
SCREAMING_SNAKE_CASE = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = PaddingStrategy(_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = padding
else:
SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 647 | 1 |
"""simple docstring"""
import re
def a__ ( __SCREAMING_SNAKE_CASE ) -> bool:
__lowerCAmelCase: Optional[int] = re.compile(R"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" )
if match := re.search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 346 |
"""simple docstring"""
from functools import lru_cache
def a__ ( __SCREAMING_SNAKE_CASE ) -> set:
__lowerCAmelCase: Any = 2
__lowerCAmelCase: Optional[Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__SCREAMING_SNAKE_CASE )
if n > 1:
factors.add(__SCREAMING_SNAKE_CASE )
return factors
@lru_cache
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
return len(unique_prime_factors(__SCREAMING_SNAKE_CASE ) )
def a__ ( __SCREAMING_SNAKE_CASE ) -> bool:
return len(set(__SCREAMING_SNAKE_CASE ) ) in (0, 1)
def a__ ( __SCREAMING_SNAKE_CASE ) -> list:
__lowerCAmelCase: int = 2
while True:
# Increment each value of a generated range
__lowerCAmelCase: Union[str, Any] = [base + i for i in range(__SCREAMING_SNAKE_CASE )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__lowerCAmelCase: Dict = [upf_len(__SCREAMING_SNAKE_CASE ) for x in group]
checker.append(__SCREAMING_SNAKE_CASE )
# If all numbers in the list are equal, return the group variable.
if equality(__SCREAMING_SNAKE_CASE ):
return group
# Increment our base variable by 1
base += 1
def a__ ( __SCREAMING_SNAKE_CASE = 4 ) -> int:
__lowerCAmelCase: List[str] = run(__SCREAMING_SNAKE_CASE )
return results[0] if len(__SCREAMING_SNAKE_CASE ) else None
if __name__ == "__main__":
print(solution())
| 346 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self : int , a__ : List[Any] , a__ : Optional[Any]=13 , a__ : List[Any]=7 , a__ : List[str]=True , a__ : Any=True , a__ : List[Any]=True , a__ : Dict=True , a__ : str=99 , a__ : List[str]=32 , a__ : Union[str, Any]=5 , a__ : Dict=4 , a__ : List[str]=37 , a__ : Tuple="gelu" , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=0.1 , a__ : Any=1_28 , a__ : int=32 , a__ : Dict=16 , a__ : Union[str, Any]=2 , a__ : List[str]=0.0_2 , a__ : Union[str, Any]=3 , a__ : int=4 , a__ : Tuple=None , ) -> Tuple:
'''simple docstring'''
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
def a_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self : Any ) -> List[Any]:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def a_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = self.prepare_config_and_inputs()
_A = True
_A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a_ ( self : Dict , a__ : Dict , a__ : int , a__ : Union[str, Any] , a__ : str , a__ : str , a__ : int , a__ : int ) -> Optional[int]:
'''simple docstring'''
_A = NezhaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_A = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_A = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a_ ( self : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : str , a__ : int , a__ : List[Any] , a__ : Union[str, Any] , a__ : Tuple , ) -> Optional[int]:
'''simple docstring'''
_A = True
_A = NezhaModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a_ ( self : Optional[Any] , a__ : Any , a__ : List[str] , a__ : Any , a__ : int , a__ : Union[str, Any] , a__ : Any , a__ : int ) -> int:
'''simple docstring'''
_A = NezhaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self : Any , a__ : Union[str, Any] , a__ : Dict , a__ : Any , a__ : str , a__ : Optional[Any] , a__ : Tuple , a__ : Any ) -> Dict:
'''simple docstring'''
_A = NezhaForNextSentencePrediction(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def a_ ( self : Any , a__ : List[str] , a__ : int , a__ : Any , a__ : Union[str, Any] , a__ : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_A = NezhaForPreTraining(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , next_sentence_label=lowerCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def a_ ( self : Dict , a__ : Dict , a__ : Dict , a__ : Any , a__ : Optional[int] , a__ : Any , a__ : int , a__ : Any ) -> Any:
'''simple docstring'''
_A = NezhaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self : Optional[int] , a__ : Tuple , a__ : Dict , a__ : Tuple , a__ : List[str] , a__ : Dict , a__ : str , a__ : int ) -> Optional[int]:
'''simple docstring'''
_A = self.num_labels
_A = NezhaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self : Tuple , a__ : int , a__ : List[str] , a__ : List[Any] , a__ : Tuple , a__ : List[str] , a__ : int , a__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_A = self.num_labels
_A = NezhaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self : Tuple , a__ : Optional[Any] , a__ : Tuple , a__ : Optional[Any] , a__ : Optional[Any] , a__ : Dict , a__ : Tuple , a__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
_A = self.num_choices
_A = NezhaForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase):
__UpperCamelCase = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = True
def a_ ( self : int , a__ : str , a__ : int , a__ : int=False ) -> int:
'''simple docstring'''
_A = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
_A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_ )
_A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def a_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_A = NezhaModelTester(self )
_A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def a_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def a_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def a_ ( self : str ) -> Optional[int]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase_ )
def a_ ( self : str ) -> Tuple:
'''simple docstring'''
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_A = None
self.model_tester.create_and_check_model_as_decoder(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
def a_ ( self : List[Any] ) -> int:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ )
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCAmelCase_ )
def a_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ )
def a_ ( self : Any ) -> List[str]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def a_ ( self : Tuple ) -> int:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def a_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def a_ ( self : int ) -> Any:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = NezhaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@slow
@require_torch_gpu
def a_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_A = True
_A = model_class(config=lowerCAmelCase_ )
_A = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
_A = torch.jit.trace(
lowerCAmelCase_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , "bert.pt" ) )
_A = torch.jit.load(os.path.join(lowerCAmelCase_ , "bert.pt" ) , map_location=lowerCAmelCase_ )
loaded(inputs_dict["input_ids"].to(lowerCAmelCase_ ) , inputs_dict["attention_mask"].to(lowerCAmelCase_ ) )
@require_torch
class snake_case ( unittest.TestCase):
@slow
def a_ ( self : Any ) -> str:
'''simple docstring'''
_A = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
_A = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_A = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
_A = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , lowerCAmelCase_ )
_A = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def a_ ( self : int ) -> Tuple:
'''simple docstring'''
_A = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
_A = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_A = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
_A = torch.Size((1, 6, 2_11_28) )
self.assertEqual(output.shape , lowerCAmelCase_ )
_A = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) ) | 712 |
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> int:
while a != 0:
_A , _A = b % a, a
return b
def a__ ( __lowercase , __lowercase ) -> int:
if gcd(__lowercase , __lowercase ) != 1:
_A = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__lowercase )
_A , _A , _A = 1, 0, a
_A , _A , _A = 0, 1, m
while va != 0:
_A = ua // va
_A , _A , _A , _A , _A , _A = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 621 | 0 |
import qiskit
def __lowercase ( a__ , a__ ) -> qiskit.result.counts.Counts:
__SCREAMING_SNAKE_CASE = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
__SCREAMING_SNAKE_CASE = qiskit.QuantumCircuit(a__ , a__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__SCREAMING_SNAKE_CASE = qiskit.execute(a__ , a__ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a__ )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 148 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ : Tuple =logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] ={
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Dict = '''roberta-prelayernorm'''
def __init__( self , _A=50_265 , _A=768 , _A=12 , _A=12 , _A=3_072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1e-12 , _A=1 , _A=0 , _A=2 , _A="absolute" , _A=True , _A=None , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
@property
def _A ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 148 | 1 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
'''simple docstring'''
def __init__( self:Tuple , _a:Union[str, Any] , _a:Tuple=13 , _a:Tuple=30 , _a:Optional[Any]=2 , _a:List[str]=3 , _a:Any=True , _a:str=True , _a:Optional[int]=32 , _a:int=5 , _a:List[Any]=4 , _a:Optional[int]=37 , _a:str="gelu" , _a:str=0.1 , _a:Tuple=0.1 , _a:Optional[Any]=10 , _a:List[str]=0.02 , _a:List[Any]=None , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ = (image_size // patch_size) ** 2
snake_case__ = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self:str ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:List[Any] , _a:Tuple , _a:Any ):
snake_case__ = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
snake_case__ = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:Tuple , _a:Optional[Any] , _a:Tuple ):
snake_case__ = self.type_sequence_label_size
snake_case__ = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
snake_case__ = model(_a , labels=_a )
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' )
print('''Labels: {labels}''' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ = 1
snake_case__ = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
snake_case__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ (snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowercase : Optional[Any] = (
{'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Tuple = False
__lowercase : List[Any] = False
__lowercase : Union[str, Any] = False
__lowercase : int = False
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = ViTMSNModelTester(self )
snake_case__ = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self:str ):
pass
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(_a )
snake_case__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def SCREAMING_SNAKE_CASE__ ( self:str ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE ( ) -> int:
snake_case__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
torch.manual_seed(2 )
snake_case__ = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(_a )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
snake_case__ = model(**_a )
# verify the logits
snake_case__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _a )
snake_case__ = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 208 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self:Optional[Any] , _a:List[Any] , _a:Any=7 , _a:str=3 , _a:Tuple=10 , _a:str=18 , _a:List[str]=30 , _a:Tuple=4_00 , _a:str=True , _a:List[str]=None , _a:List[str]=True , _a:Optional[Any]=[0.5, 0.5, 0.5] , _a:List[str]=[0.5, 0.5, 0.5] , _a:int=None , ):
snake_case__ = size if size is not None else {'''shortest_edge''': 18}
snake_case__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = num_frames
snake_case__ = image_size
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = do_normalize
snake_case__ = image_mean
snake_case__ = image_std
snake_case__ = crop_size
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Union[str, Any] = VivitImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = VivitImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
snake_case__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_a )
for video in video_inputs:
self.assertIsInstance(_a , _a )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
snake_case__ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case__ = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for video in video_inputs:
self.assertIsInstance(_a , _a )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
snake_case__ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case__ = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for video in video_inputs:
self.assertIsInstance(_a , _a )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
snake_case__ = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case__ = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 208 | 1 |
"""simple docstring"""
def a ( __UpperCAmelCase : str ) -> str:
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class UpperCAmelCase ( unittest.TestCase ):
def _A ( self: Any ):
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='''utf-8''' , check=__UpperCamelCase , )
assert hasattr(self , '''env''' )
def _A ( self: Optional[int] , __UpperCamelCase: Optional[int]=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-single" , instance_count=__UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCamelCase , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def _A ( self: Optional[Any] , __UpperCamelCase: Optional[int] ):
TrainingJobAnalytics(__UpperCamelCase ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
def _A ( self: List[Any] ):
# create estimator
_a = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
_a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_a = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __UpperCamelCase )
| 487 | 0 |
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Any , A_ : Any ) -> Optional[Any]:
__snake_case = 3
__snake_case = 250
__snake_case = ids_tensor((batch_size, length) , lowerCamelCase_ )
__snake_case = torch.ones((batch_size, length) , device=lowerCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def lowercase ( self : Any ) -> Union[str, Any]:
__snake_case = self._get_tensors(5 )
__snake_case = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
__snake_case = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
__snake_case = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowercase ( self : str ) -> Any:
__snake_case = MaxLengthCriteria(max_length=10 )
__snake_case = self._get_tensors(5 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
__snake_case = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
__snake_case = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowercase ( self : List[str] ) -> int:
__snake_case = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__snake_case = self._get_tensors(5 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
__snake_case = self._get_tensors(9 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
__snake_case = self._get_tensors(10 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
__snake_case = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def lowercase ( self : Optional[int] ) -> Any:
__snake_case = self._get_tensors(5 )
__snake_case = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
__snake_case = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCamelCase_ , lowerCamelCase_ ) )
def lowercase ( self : List[Any] ) -> Optional[int]:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
__snake_case = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCamelCase_ ) , 1 ) | 713 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowercase : Optional[Any] = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 93 | 0 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase__ = []
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for i in range(len(A__ ) ):
if board[row][i] == 1:
return False
for i in range(len(A__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(A__ , -1 , -1 ) , range(A__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(A__ , -1 , -1 ) , range(A__ , len(A__ ) ) ):
if board[i][j] == 1:
return False
return True
def _A ( A__ , A__ ):
"""simple docstring"""
if row >= len(A__ ):
solution.append(A__ )
printboard(A__ )
print()
return True
for i in range(len(A__ ) ):
if is_safe(A__ , A__ , A__ ):
__lowercase = 1
solve(A__ , row + 1 )
__lowercase = 0
return False
def _A ( A__ ):
"""simple docstring"""
for i in range(len(A__ ) ):
for j in range(len(A__ ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase__ = 8
lowerCAmelCase__ = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 41 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class lowercase_ (Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class lowercase_ (_Item ):
"""simple docstring"""
def __init__( self : Optional[int] ):
super().__init__(lowercase__ ,lowercase__ )
def __bool__( self : List[str] ):
return False
lowerCAmelCase__ = _DeletedItem()
class lowercase_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ):
__lowercase = initial_block_size
__lowercase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowercase = capacity_factor
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ):
return hash(lowercase__ ) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ):
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ):
__lowercase = self._buckets[ind]
if not stored:
__lowercase = _Item(lowercase__ ,lowercase__ )
self._len += 1
return True
elif stored.key == key:
__lowercase = _Item(lowercase__ ,lowercase__ )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
__lowercase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = self._buckets
__lowercase = [None] * new_size
__lowercase = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def SCREAMING_SNAKE_CASE ( self : str ):
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ):
__lowercase = self._get_bucket_index(lowercase__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowercase = self._get_next_ind(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
for ind in self._iterate_buckets(lowercase__ ):
if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ):
break
def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(lowercase__ ,lowercase__ )
def __delitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
raise KeyError(lowercase__ )
if item is _deleted:
continue
if item.key == key:
__lowercase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase__ )
def __len__( self : Optional[int] ):
return self._len
def __iter__( self : str ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
__lowercase = ''' ,'''.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 41 | 1 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowercase__ =logging.getLogger(__name__)
lowercase__ =50 # max width of layer names
lowercase__ =70 # max width of quantizer names
def __UpperCamelCase ( lowerCAmelCase__ : Dict ):
__a : Tuple = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=lowerCAmelCase__ , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=lowerCAmelCase__ , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=lowerCAmelCase__ , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=lowerCAmelCase__ , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=lowerCAmelCase__ , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=lowerCAmelCase__ , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] ):
if args.calibrator == "max":
__a : Any = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
__a : Tuple = '''histogram'''
elif args.calibrator == "mse":
__a : str = '''histogram'''
else:
raise ValueError(f"Invalid calibrator {args.calibrator}" )
__a : Union[str, Any] = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase__ )
__a : int = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : int=False ):
logger.info('''Configuring Model for Quantization''' )
logger.info(f"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCAmelCase__ , ['''embeddings'''] , which='''weight''' , _disabled=lowerCAmelCase__ )
if args.quant_disable:
set_quantizer_by_name(lowerCAmelCase__ , [''''''] , _disabled=lowerCAmelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCAmelCase__ , args.quant_disable_keyword , _disabled=lowerCAmelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCAmelCase__ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCAmelCase__ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase__ )
if args.recalibrate_weights:
recalibrate_weights(lowerCAmelCase__ )
if args.fuse_qkv:
fuse_qkv(lowerCAmelCase__ , lowerCAmelCase__ )
if args.clip_gelu:
clip_gelu(lowerCAmelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Any ):
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f"{name:80}: {module}" )
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] ):
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] ):
def fusea(lowerCAmelCase__ : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCAmelCase__ , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
__a : Tuple = qq._amax.detach().item()
__a : Dict = qk._amax.detach().item()
__a : List[str] = qv._amax.detach().item()
__a : Any = max(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
qq._amax.fill_(lowerCAmelCase__ )
qk._amax.fill_(lowerCAmelCase__ )
qv._amax.fill_(lowerCAmelCase__ )
logger.info(f" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(f"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict ):
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
__a : Any = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase__ )
__a : Optional[Any] = mod._input_quantizer._amax.data.detach().item()
logger.info(f"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] ):
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase__ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
__a : int = mod.weight.shape[0]
__a : List[Any] = mod._weight_quantizer._amax.detach()
__a : int = torch.ones(lowerCAmelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(f"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] ):
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase__ , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
__a : Optional[int] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
__a : str = set(range(len(mod.weight.size() ) ) ) - axis_set
__a : Dict = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase__ , keepdims=lowerCAmelCase__ ).detach()
logger.info(f"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
__a : Optional[Any] = amax
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any]=2_5 , lowerCAmelCase__ : List[str]=1_8_0 , lowerCAmelCase__ : Tuple=None ):
if ignore is None:
__a : List[Any] = []
elif not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : Dict = [ignore]
__a : List[str] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCAmelCase__ , '''weight''' ):
continue
__a : List[Any] = max(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
for name, mod in model.named_modules():
__a : Optional[int] = getattr(lowerCAmelCase__ , '''_input_quantizer''' , lowerCAmelCase__ )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''_weight_quantizer''' , lowerCAmelCase__ )
if not hasattr(lowerCAmelCase__ , '''weight''' ):
continue
if type(lowerCAmelCase__ ) in ignore:
continue
if [True for s in ignore if type(lowerCAmelCase__ ) is str and s in name]:
continue
__a : Optional[Any] = f"Act:{input_q.extra_repr()}"
__a : Any = f"Wgt:{weight_q.extra_repr()}"
__a : Optional[int] = f"{name:{name_width}} {act_str} {wgt_str}"
if len(lowerCAmelCase__ ) <= line_width:
logger.info(lowerCAmelCase__ )
else:
logger.info(f"{name:{name_width}} {act_str}" )
logger.info(f"{' ':{name_width}} {wgt_str}" )
def __UpperCamelCase ( lowerCAmelCase__ : Dict ):
__a : List[str] = 0
for name, mod in model.named_modules():
if isinstance(lowerCAmelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(f"{name:80} {mod}" )
count += 1
print(f"{count} TensorQuantizers found in model" )
def __UpperCamelCase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] ):
__a : List[Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if quantizer_mod is not None:
assert hasattr(lowerCAmelCase__ , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
logger.warning(f"{name} has no {quantizer}" )
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int]="both" , **lowerCAmelCase__ : List[Any] ):
__a : Optional[Any] = f"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += f" {k}={v}"
if which in ["input", "both"]:
set_quantizer(lowerCAmelCase__ , lowerCAmelCase__ , '''_input_quantizer''' , lowerCAmelCase__ , lowerCAmelCase__ )
if which in ["weight", "both"]:
set_quantizer(lowerCAmelCase__ , lowerCAmelCase__ , '''_weight_quantizer''' , lowerCAmelCase__ , lowerCAmelCase__ )
logger.info(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : Union[str, Any] ):
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase__ , '''_input_quantizer''' ) or hasattr(lowerCAmelCase__ , '''_weight_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase__ , lowerCAmelCase__ ):
set_quantizers(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : List[str] = f"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += f" {k}={v}"
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
logger.info(lowerCAmelCase__ )
| 712 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
def __init__(self : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[int]=7 , snake_case_ : List[str]=3 , snake_case_ : List[str]=3_0 , snake_case_ : Union[str, Any]=4_0_0 , snake_case_ : Optional[Any]=True , snake_case_ : Tuple=None , snake_case_ : List[Any]=True , snake_case_ : Tuple=[0.5, 0.5, 0.5] , snake_case_ : Optional[int]=[0.5, 0.5, 0.5] , snake_case_ : Dict=True , snake_case_ : Any=1 / 2_5_5 , snake_case_ : Any=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a : Optional[Any] = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
__a : List[Any] = parent
__a : Optional[Any] = batch_size
__a : int = num_channels
__a : Any = min_resolution
__a : Optional[Any] = max_resolution
__a : List[str] = do_resize
__a : Optional[int] = size
__a : Dict = do_normalize
__a : Any = image_mean
__a : Tuple = image_std
__a : Union[str, Any] = do_rescale
__a : Union[str, Any] = rescale_factor
__a : List[Any] = do_pad
def lowerCAmelCase (self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase (self : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any]=False ):
if not batched:
__a : str = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
__a , __a : Tuple = image.size
else:
__a , __a : Tuple = image.shape[1], image.shape[2]
if w < h:
__a : int = int(self.size['''shortest_edge'''] * h / w )
__a : Any = self.size['''shortest_edge''']
elif w > h:
__a : Tuple = self.size['''shortest_edge''']
__a : int = int(self.size['''shortest_edge'''] * w / h )
else:
__a : List[Any] = self.size['''shortest_edge''']
__a : Dict = self.size['''shortest_edge''']
else:
__a : Union[str, Any] = []
for image in image_inputs:
__a , __a : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a : Union[str, Any] = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
__a : Any = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase__ ( __lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = YolosImageProcessor if is_vision_available() else None
def lowerCAmelCase (self : Any ):
__a : Any = YolosImageProcessingTester(self )
@property
def lowerCAmelCase (self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase (self : Optional[int] ):
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , '''image_mean''' ) )
self.assertTrue(hasattr(snake_case_ , '''image_std''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_normalize''' ) )
self.assertTrue(hasattr(snake_case_ , '''do_resize''' ) )
self.assertTrue(hasattr(snake_case_ , '''size''' ) )
def lowerCAmelCase (self : Union[str, Any] ):
__a : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , snake_case_ )
__a : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=snake_case_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , snake_case_ )
def lowerCAmelCase (self : str ):
pass
def lowerCAmelCase (self : Optional[Any] ):
# Initialize image_processing
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__a : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__a , __a : int = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : List[str] = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__a : str = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase (self : str ):
# Initialize image_processing
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__a : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__a , __a : Dict = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : int = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
__a , __a : List[str] = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase (self : Optional[Any] ):
# Initialize image_processing
__a : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__a : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__a , __a : Optional[Any] = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : Tuple = image_processing(snake_case_ , return_tensors='''pt''' ).pixel_values
__a , __a : Union[str, Any] = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase (self : Any ):
# Initialize image_processings
__a : Any = self.image_processing_class(**self.image_processor_dict )
__a : str = self.image_processing_class(do_resize=snake_case_ , do_normalize=snake_case_ , do_rescale=snake_case_ )
# create random PyTorch tensors
__a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
__a : List[Any] = image_processing_a.pad(snake_case_ , return_tensors='''pt''' )
__a : Union[str, Any] = image_processing_a(snake_case_ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4 ) )
@slow
def lowerCAmelCase (self : List[str] ):
# prepare image and target
__a : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
__a : str = json.loads(f.read() )
__a : Dict = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
__a : Optional[Any] = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
__a : Tuple = image_processing(images=snake_case_ , annotations=snake_case_ , return_tensors='''pt''' )
# verify pixel values
__a : int = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , snake_case_ )
__a : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , snake_case_ , atol=1E-4 ) )
# verify area
__a : int = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , snake_case_ ) )
# verify boxes
__a : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , snake_case_ )
__a : List[str] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , snake_case_ , atol=1E-3 ) )
# verify image_id
__a : Optional[Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , snake_case_ ) )
# verify is_crowd
__a : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , snake_case_ ) )
# verify class_labels
__a : Optional[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , snake_case_ ) )
# verify orig_size
__a : Optional[int] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , snake_case_ ) )
# verify size
__a : Optional[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , snake_case_ ) )
@slow
def lowerCAmelCase (self : Optional[int] ):
# prepare image, target and masks_path
__a : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
__a : int = json.loads(f.read() )
__a : Optional[int] = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
__a : List[str] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__a : Any = YolosImageProcessor(format='''coco_panoptic''' )
__a : Any = image_processing(images=snake_case_ , annotations=snake_case_ , masks_path=snake_case_ , return_tensors='''pt''' )
# verify pixel values
__a : Tuple = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , snake_case_ )
__a : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , snake_case_ , atol=1E-4 ) )
# verify area
__a : Optional[Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , snake_case_ ) )
# verify boxes
__a : int = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , snake_case_ )
__a : Tuple = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , snake_case_ , atol=1E-3 ) )
# verify image_id
__a : Dict = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , snake_case_ ) )
# verify is_crowd
__a : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , snake_case_ ) )
# verify class_labels
__a : Any = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , snake_case_ ) )
# verify masks
__a : Tuple = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , snake_case_ )
# verify orig_size
__a : Any = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , snake_case_ ) )
# verify size
__a : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , snake_case_ ) )
| 326 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
snake_case__ : Tuple = set()
# Replace all the whitespace in our sentence
snake_case__ : List[Any] = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__magic_name__ ) == 26
def UpperCamelCase__ ( __magic_name__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
snake_case__ : Optional[Any] = [False] * 26
for char in input_str:
if char.islower():
snake_case__ : int = True
elif char.isupper():
snake_case__ : Optional[Any] = True
return all(__magic_name__ )
def UpperCamelCase__ ( __magic_name__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def UpperCamelCase__ ( ) -> None:
'''simple docstring'''
from timeit import timeit
snake_case__ : Optional[Any] = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=__magic_name__ ) )
print(timeit("""is_pangram_faster()""" , setup=__magic_name__ ) )
print(timeit("""is_pangram_fastest()""" , setup=__magic_name__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 38 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : List[str] = int(lowerCamelCase_ )
_lowercase , _lowercase , _lowercase : Optional[Any] = t // 3600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=300 ) -> Dict:
# docstyle-ignore
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : int = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_lowercase : Any = F'''{elt:.6f}''' if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else str(lowerCamelCase_ )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _lowerCamelCase:
lowercase_ : str = 5
lowercase_ : str = 0.2
def __init__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = True, lowerCamelCase = None, lowerCamelCase = 3_00, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = total
_lowercase : Optional[int] = '' if prefix is None else prefix
_lowercase : Tuple = leave
_lowercase : str = parent
_lowercase : str = width
_lowercase : List[Any] = None
_lowercase : List[str] = None
_lowercase : Tuple = None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, lowerCamelCase = None) -> Dict:
"""simple docstring"""
_lowercase : Any = value
if comment is not None:
_lowercase : Union[str, Any] = comment
if self.last_value is None:
_lowercase : Dict = time.time()
_lowercase : Tuple = value
_lowercase : str = None
_lowercase : Optional[int] = self.warmup
_lowercase : Optional[Any] = 1
self.update_bar(lowerCamelCase)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total):
if self.first_calls > 0:
self.first_calls -= 1
_lowercase : List[str] = time.time()
_lowercase : Tuple = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_lowercase : Dict = self.elapsed_time / (value - self.start_value)
else:
_lowercase : int = None
if value >= self.total:
_lowercase : Dict = self.total
_lowercase : List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_lowercase : Optional[int] = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCamelCase)
_lowercase : int = value
_lowercase : Tuple = current_time
if self.average_time_per_item is None:
_lowercase : str = 1
else:
_lowercase : int = max(int(self.update_every / self.average_time_per_item), 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = ' ' * (len(str(self.total)) - len(str(lowerCamelCase))) + str(lowerCamelCase)
if self.elapsed_time is None:
_lowercase : int = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
_lowercase : Union[str, Any] = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)}'''
else:
_lowercase : Union[str, Any] = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <'''
F''' {format_time(self.predicted_remaining)}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment) == 0 else F''', {self.comment}]'''
self.display()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_lowercase : Optional[Any] = disp.display(disp.HTML(self.html_code), display_id=lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(''))
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=None) -> int:
"""simple docstring"""
super().__init__(lowerCamelCase)
_lowercase : Optional[Any] = None if column_names is None else [column_names]
_lowercase : Any = None
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Any = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_lowercase : Dict = disp.display(disp.HTML(self.html_code), display_id=lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
if self.inner_table is None:
_lowercase : Dict = [list(values.keys()), list(values.values())]
else:
_lowercase : Tuple = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCamelCase)
_lowercase : str = columns
self.inner_table.append([values[c] for c in columns])
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=3_00) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = NotebookProgressBar(lowerCamelCase, prefix=lowerCamelCase, parent=self, width=lowerCamelCase)
return self.child_bar
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Optional[Any] = None
self.display()
class _lowerCamelCase( _a ):
def __init__( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = None
_lowercase : Dict = None
_lowercase : Dict = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Dict = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
_lowercase : Dict = 0
_lowercase : Tuple = 0
_lowercase : int = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss')
_lowercase : Union[str, Any] = NotebookTrainingTracker(state.max_steps, lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Any = int(state.epoch) if int(state.epoch) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1, comment=F'''Epoch {epoch}/{state.num_train_epochs}''', force_update=self._force_next_update, )
_lowercase : str = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> Any:
"""simple docstring"""
if not has_length(lowerCamelCase):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_lowercase : Optional[int] = self.training_tracker.add_child(len(lowerCamelCase))
else:
_lowercase : Optional[int] = NotebookProgressBar(len(lowerCamelCase))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
_lowercase : Any = None
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_lowercase : Dict = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
_lowercase : List[Any] = state.global_step
self.training_tracker.write_line(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, **lowerCamelCase) -> List[str]:
"""simple docstring"""
if self.training_tracker is not None:
_lowercase : Tuple = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history):
if "loss" in log:
_lowercase : int = log['loss']
break
if self.first_column == "Epoch":
_lowercase : Union[str, Any] = int(state.epoch)
else:
_lowercase : Optional[Any] = state.global_step
_lowercase : str = 'eval'
for k in metrics:
if k.endswith('_loss'):
_lowercase : str = re.sub(R'\_loss$', '', lowerCamelCase)
_lowercase : Tuple = metrics.pop('total_flos', lowerCamelCase)
_lowercase : List[str] = metrics.pop('epoch', lowerCamelCase)
_lowercase : List[Any] = metrics.pop(F'''{metric_key_prefix}_runtime''', lowerCamelCase)
_lowercase : Dict = metrics.pop(F'''{metric_key_prefix}_samples_per_second''', lowerCamelCase)
_lowercase : Tuple = metrics.pop(F'''{metric_key_prefix}_steps_per_second''', lowerCamelCase)
_lowercase : List[str] = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''', lowerCamelCase)
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
_lowercase : Union[str, Any] = v
else:
_lowercase : Optional[Any] = k.split('_')
_lowercase : Optional[int] = ' '.join([part.capitalize() for part in splits[1:]])
_lowercase : Tuple = v
self.training_tracker.write_line(lowerCamelCase)
self.training_tracker.remove_child()
_lowercase : str = None
# Evaluation takes a long time so we should force the next update.
_lowercase : Optional[Any] = True
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
self.training_tracker.update(
state.global_step, comment=F'''Epoch {int(state.epoch)}/{state.num_train_epochs}''', force_update=lowerCamelCase)
_lowercase : Any = None
| 89 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __snake_case , unittest.TestCase ):
__magic_name__ = KandinskyInpaintPipeline
__magic_name__ = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
__magic_name__ = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
__magic_name__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__magic_name__ = False
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return 100
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
A : List[str] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
A : List[str] = MultilingualCLIP(SCREAMING_SNAKE_CASE )
A : List[str] = text_encoder.eval()
return text_encoder
@property
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
A : str = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
A : Union[str, Any] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
A : int = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Optional[Any] = self.dummy_text_encoder
A : Optional[int] = self.dummy_tokenizer
A : Optional[Any] = self.dummy_unet
A : str = self.dummy_movq
A : Dict = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type='''epsilon''' , thresholding=SCREAMING_SNAKE_CASE , )
A : int = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ) -> int:
"""simple docstring"""
A : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE )
A : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(SCREAMING_SNAKE_CASE )
# create init_image
A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE )
A : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A : Optional[Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
A : Union[str, Any] = np.ones((64, 64) , dtype=np.floataa )
A : List[Any] = 0
if str(SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
A : str = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
A : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
A : Optional[Any] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Optional[Any] = '''cpu'''
A : Tuple = self.get_dummy_components()
A : Any = self.pipeline_class(**SCREAMING_SNAKE_CASE )
A : Any = pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
A : Dict = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) )
A : Tuple = output.images
A : List[Any] = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) , return_dict=SCREAMING_SNAKE_CASE , )[0]
A : Optional[int] = image[0, -3:, -3:, -1]
A : List[str] = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
A : List[str] = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
A : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
A : List[str] = np.ones((768, 768) , dtype=np.floataa )
A : Union[str, Any] = 0
A : Dict = '''a hat'''
A : List[str] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE )
A : List[str] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
A : Tuple = pipeline.to(SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
A : int = torch.Generator(device='''cpu''' ).manual_seed(0 )
A : Dict = pipe_prior(
SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
A : Dict = pipeline(
SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , image_embeds=SCREAMING_SNAKE_CASE , negative_image_embeds=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
A : Optional[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 718 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase : Dict = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class A ( unittest.TestCase ):
__magic_name__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__magic_name__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__magic_name__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__magic_name__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : Any = ZeroShotClassificationPipeline(
model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : Optional[int] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE )]} )
# No kwarg
A : Dict = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE )]} )
A : str = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE )]} )
A : str = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
A : Optional[int] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
A : Any = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE )]} )
# https://github.com/huggingface/transformers/issues/13846
A : List[str] = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )]}
for i in range(1 )
] , )
A : Dict = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )]}
for i in range(2 )
] , )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier(SCREAMING_SNAKE_CASE , candidate_labels='''politics''' )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=SCREAMING_SNAKE_CASE )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=SCREAMING_SNAKE_CASE , )
self.run_entailment_id(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : List[Any] = zero_shot_classifier.model.config
A : int = config.labelaid
A : Union[str, Any] = zero_shot_classifier.entailment_id
A : str = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
A : Optional[Any] = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A : List[str] = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A : List[str] = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
A : Any = original_labelaid
self.assertEqual(SCREAMING_SNAKE_CASE , zero_shot_classifier.entailment_id )
@require_torch
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[int] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Tuple = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
A : Optional[int] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
A : Union[str, Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : str = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
A : Tuple = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
A : List[str] = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : List[str] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
A : List[Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
A : Tuple = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 343 | 0 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _A ( __magic_name__):
def __init__( self , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = eval_examples
SCREAMING_SNAKE_CASE_ : Dict = post_process_function
SCREAMING_SNAKE_CASE_ : Tuple = quant_trainer_args
SCREAMING_SNAKE_CASE_ : List[Any] = 128 # default number of calibration samples
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = calib_dataset if calib_dataset is not None else self.calib_dataset
SCREAMING_SNAKE_CASE_ : List[str] = self._remove_unused_columns(_SCREAMING_SNAKE_CASE , description='Calibration' )
return DataLoader(
_SCREAMING_SNAKE_CASE , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=_SCREAMING_SNAKE_CASE , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.train_dataset if calib_dataset is None else calib_dataset
SCREAMING_SNAKE_CASE_ : List[str] = self.get_calib_dataloader(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = self.model
quant_trainer.configure_model(_SCREAMING_SNAKE_CASE , self.quant_trainer_args , calib=_SCREAMING_SNAKE_CASE )
model.eval()
quant_trainer.enable_calibration(_SCREAMING_SNAKE_CASE )
logger.info('***** Running calibration *****' )
logger.info(f" Num examples = {self.calib_num}" )
logger.info(f" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(_SCREAMING_SNAKE_CASE ):
# Prediction step
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.prediction_step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prediction_loss_only=_SCREAMING_SNAKE_CASE )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(_SCREAMING_SNAKE_CASE , self.quant_trainer_args )
SCREAMING_SNAKE_CASE_ : Optional[int] = model
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = "eval" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_eval_dataloader(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ : Tuple = self.compute_metrics
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ : Optional[Any] = eval_loop(
_SCREAMING_SNAKE_CASE , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_SCREAMING_SNAKE_CASE , )
finally:
SCREAMING_SNAKE_CASE_ : Optional[int] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
SCREAMING_SNAKE_CASE_ : List[str] = self.post_process_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , output.predictions )
SCREAMING_SNAKE_CASE_ : Dict = self.compute_metrics(_SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE_ : List[Any] = metrics.pop(_SCREAMING_SNAKE_CASE )
self.log(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE_ : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _SCREAMING_SNAKE_CASE )
return metrics
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = "test" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.get_test_dataloader(_SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ : Tuple = self.compute_metrics
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ : List[Any] = eval_loop(
_SCREAMING_SNAKE_CASE , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_SCREAMING_SNAKE_CASE , )
finally:
SCREAMING_SNAKE_CASE_ : Optional[int] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE_ : str = self.post_process_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , output.predictions , 'predict' )
SCREAMING_SNAKE_CASE_ : Tuple = self.compute_metrics(_SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"{metric_key_prefix}_" ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = metrics.pop(_SCREAMING_SNAKE_CASE )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE="./" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.eval_dataset
SCREAMING_SNAKE_CASE_ : Dict = self.get_eval_dataloader(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = next(iter(_SCREAMING_SNAKE_CASE ) )
# saving device - to make it consistent
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
# convert to tuple
SCREAMING_SNAKE_CASE_ : int = tuple(v.to(_SCREAMING_SNAKE_CASE ) for k, v in batch.items() )
logger.info('Converting model to be onnx compatible' )
from pytorch_quantization.nn import TensorQuantizer
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = self.model.to(_SCREAMING_SNAKE_CASE )
model.eval()
model.float()
SCREAMING_SNAKE_CASE_ : Any = model.module if hasattr(_SCREAMING_SNAKE_CASE , 'module' ) else model
quant_trainer.configure_model(_SCREAMING_SNAKE_CASE , self.quant_trainer_args )
SCREAMING_SNAKE_CASE_ : int = os.path.join(_SCREAMING_SNAKE_CASE , 'model.onnx' )
logger.info(f"exporting model to {output_model_file}" )
SCREAMING_SNAKE_CASE_ : List[Any] = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , export_params=_SCREAMING_SNAKE_CASE , opset_version=13 , do_constant_folding=_SCREAMING_SNAKE_CASE , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=_SCREAMING_SNAKE_CASE , )
logger.info('onnx export finished' )
| 511 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : List[Any] = (UniPCMultistepScheduler,)
SCREAMING_SNAKE_CASE : Union[str, Any] = (('''num_inference_steps''', 25),)
def UpperCAmelCase ( self , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('num_inference_steps' , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1 * sample
SCREAMING_SNAKE_CASE_ : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_ : str = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = sample, sample
for t in range(_SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_ : str = kwargs.pop('num_inference_steps' , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = self.dummy_sample
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1 * sample
SCREAMING_SNAKE_CASE_ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Any = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Dict = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE_ : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE_ : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE_ : Tuple = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE_ : List[str] = new_scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if scheduler is None:
SCREAMING_SNAKE_CASE_ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = scheduler_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : int = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = 10
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_sample_deter
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ : Dict = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
return sample
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_ : str = kwargs.pop('num_inference_steps' , _SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : str = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = self.dummy_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(_SCREAMING_SNAKE_CASE , 'set_timesteps' ):
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(_SCREAMING_SNAKE_CASE , 'set_timesteps' ):
SCREAMING_SNAKE_CASE_ : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
SCREAMING_SNAKE_CASE_ : str = dummy_past_residuals[: scheduler.config.solver_order]
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.timesteps[5]
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.timesteps[6]
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = UniPCMultistepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE_ : List[str] = self.full_loop(scheduler=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
SCREAMING_SNAKE_CASE_ : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE_ : Any = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE_ : Dict = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE_ : Tuple = self.full_loop(scheduler=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase ( self ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , solver_order=_SCREAMING_SNAKE_CASE , solver_type=_SCREAMING_SNAKE_CASE , )
def UpperCAmelCase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_SCREAMING_SNAKE_CASE , solver_type=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.full_loop(
solver_order=_SCREAMING_SNAKE_CASE , solver_type=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , )
assert not torch.isnan(_SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def UpperCAmelCase ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=_SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_SCREAMING_SNAKE_CASE , time_step=0 )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.full_loop()
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.full_loop(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : int = self.get_scheduler_config(thresholding=_SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE_ : Dict = scheduler_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = 10
SCREAMING_SNAKE_CASE_ : int = self.dummy_model()
SCREAMING_SNAKE_CASE_ : int = self.dummy_sample_deter.half()
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase ( self , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 511 | 1 |
from collections.abc import Generator
from math import sin
def UpperCamelCase__ ( A__ ) -> bytes:
if len(A__ ) != 32:
raise ValueError('Input must be of length 32' )
snake_case__ : int = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCamelCase__ ( A__ ) -> bytes:
if i < 0:
raise ValueError('Input must be non-negative' )
snake_case__ : Union[str, Any] = format(A__ , '08x' )[-8:]
snake_case__ : Union[str, Any] = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCamelCase__ ( A__ ) -> bytes:
snake_case__ : Optional[int] = b''
for char in message:
bit_string += format(A__ , '08b' ).encode('utf-8' )
snake_case__ : Optional[int] = format(len(A__ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(A__ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def UpperCamelCase__ ( A__ ) -> Generator[list[int], None, None]:
if len(A__ ) % 512 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(A__ ) , 512 ):
snake_case__ : int = bit_string[pos : pos + 512]
snake_case__ : List[str] = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def UpperCamelCase__ ( A__ ) -> int:
if i < 0:
raise ValueError('Input must be non-negative' )
snake_case__ : Any = format(A__ , '032b' )
snake_case__ : Tuple = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(A__ , 2 )
def UpperCamelCase__ ( A__ , A__ ) -> int:
return (a + b) % 2**32
def UpperCamelCase__ ( A__ , A__ ) -> int:
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCamelCase__ ( A__ ) -> bytes:
snake_case__ : Any = preprocess(A__ )
snake_case__ : Union[str, Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
snake_case__ : Dict = 0x67_45_23_01
snake_case__ : List[str] = 0xEF_CD_AB_89
snake_case__ : List[str] = 0x98_BA_DC_FE
snake_case__ : int = 0x10_32_54_76
snake_case__ : Union[str, Any] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(A__ ):
snake_case__ : List[str] = aa
snake_case__ : Optional[int] = ba
snake_case__ : Union[str, Any] = ca
snake_case__ : List[Any] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
snake_case__ : Optional[Any] = d ^ (b & (c ^ d))
snake_case__ : int = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
snake_case__ : str = c ^ (d & (b ^ c))
snake_case__ : Optional[Any] = (5 * i + 1) % 16
elif i <= 47:
snake_case__ : List[str] = b ^ c ^ d
snake_case__ : Optional[int] = (3 * i + 5) % 16
else:
snake_case__ : Any = c ^ (b | not_aa(A__ ))
snake_case__ : Optional[int] = (7 * i) % 16
snake_case__ : Union[str, Any] = (f + a + added_consts[i] + block_words[g]) % 2**32
snake_case__ : List[Any] = d
snake_case__ : Optional[Any] = c
snake_case__ : Optional[int] = b
snake_case__ : int = sum_aa(A__ , left_rotate_aa(A__ , shift_amounts[i] ) )
# Add hashed chunk to running total
snake_case__ : List[Any] = sum_aa(A__ , A__ )
snake_case__ : int = sum_aa(A__ , A__ )
snake_case__ : List[Any] = sum_aa(A__ , A__ )
snake_case__ : Optional[Any] = sum_aa(A__ , A__ )
snake_case__ : Dict = reformat_hex(A__ ) + reformat_hex(A__ ) + reformat_hex(A__ ) + reformat_hex(A__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 | from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCAmelCase__ : Dict = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCAmelCase__ : List[str] = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCAmelCase__ : List[str] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def UpperCamelCase__ ( A__ , A__ ) -> tuple[str, float]:
snake_case__ : Tuple = len([g for position, g in enumerate(A__ ) if g == main_target[position]] )
return (item, float(A__ ))
def UpperCamelCase__ ( A__ , A__ ) -> tuple[str, str]:
snake_case__ : str = random.randint(0 , len(A__ ) - 1 )
snake_case__ : int = parent_a[:random_slice] + parent_a[random_slice:]
snake_case__ : Any = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def UpperCamelCase__ ( A__ , A__ ) -> str:
snake_case__ : List[Any] = list(A__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
snake_case__ : Optional[Any] = random.choice(A__ )
return "".join(A__ )
def UpperCamelCase__ ( A__ , A__ , A__ , ) -> list[str]:
snake_case__ : Tuple = []
# Generate more children proportionally to the fitness score.
snake_case__ : Optional[Any] = int(parent_a[1] * 100 ) + 1
snake_case__ : str = 10 if child_n >= 10 else child_n
for _ in range(A__ ):
snake_case__ : Any = population_score[random.randint(0 , A__ )][0]
snake_case__ , snake_case__ : int = crossover(parent_a[0] , A__ )
# Append new string to the population list.
pop.append(mutate(A__ , A__ ) )
pop.append(mutate(A__ , A__ ) )
return pop
def UpperCamelCase__ ( A__ , A__ , A__ = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
snake_case__ : Union[str, Any] = F"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(A__ )
# Verify that the target contains no genes besides the ones inside genes variable.
snake_case__ : Tuple = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
snake_case__ : int = F"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(A__ )
# Generate random starting population.
snake_case__ : Union[str, Any] = []
for _ in range(A__ ):
population.append(''.join([random.choice(A__ ) for i in range(len(A__ ) )] ) )
# Just some logs to know what the algorithms is doing.
snake_case__ , snake_case__ : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(A__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
snake_case__ : List[Any] = [evaluate(A__ , A__ ) for item in population]
# Check if there is a matching evolution.
snake_case__ : int = sorted(A__ , key=lambda A__ : x[1] , reverse=A__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"""\nGeneration: {generation}"""
F"""\nTotal Population:{total_population}"""
F"""\nBest score: {population_score[0][1]}"""
F"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
snake_case__ : Optional[int] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(A__ )
# Normalize population score to be between 0 and 1.
snake_case__ : str = [
(item, score / len(A__ )) for item, score in population_score
]
# This is selection
for i in range(A__ ):
population.extend(select(population_score[int(A__ )] , A__ , A__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(A__ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCAmelCase__ : str = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
lowerCAmelCase__ : Optional[Any] = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ : List[str] = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 699 | 0 |
'''simple docstring'''
def __lowercase ( __lowercase ) -> bool:
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = '''▁'''
lowerCamelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowerCamelCase_ = {
'''facebook/mbart-large-50-one-to-many-mmt''': 10_24,
}
# fmt: off
lowerCamelCase_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = ['''input_ids''', '''attention_mask''']
snake_case = []
snake_case = []
def __init__( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Dict="</s>" , __UpperCAmelCase : Dict="</s>" , __UpperCAmelCase : List[str]="<s>" , __UpperCAmelCase : Optional[int]="<unk>" , __UpperCAmelCase : Optional[int]="<pad>" , __UpperCAmelCase : Union[str, Any]="<mask>" , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Optional[int] , ):
'''simple docstring'''
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
_A = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
_A = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_A = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_A = 1
_A = len(self.sp_model )
_A = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__UpperCAmelCase )
}
_A = {v: k for k, v in self.lang_code_to_id.items()}
_A = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_A = src_lang if src_lang is not None else "en_XX"
_A = self.lang_code_to_id[self._src_lang]
_A = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ):
'''simple docstring'''
_A = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Any ):
'''simple docstring'''
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : Optional[Any] , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : str , __UpperCAmelCase : str ):
'''simple docstring'''
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_A = self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : int ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = []
_A = ""
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
_A = True
_A = []
else:
current_sub_tokens.append(__UpperCAmelCase )
_A = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def lowerCAmelCase ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , "wb" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def lowerCAmelCase ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
_A = [1] * len(self.prefix_tokens )
_A = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] , __UpperCAmelCase : Optional[str] , **__UpperCAmelCase : List[Any] ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_A = src_lang
_A = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
_A = self.convert_tokens_to_ids(__UpperCAmelCase )
_A = tgt_lang_id
return inputs
def lowerCAmelCase ( self : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : str = "en_XX" , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : str = "ro_RO" , **__UpperCAmelCase : List[str] , ):
'''simple docstring'''
_A = src_lang
_A = tgt_lang
return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ):
'''simple docstring'''
_A = self.lang_code_to_id[src_lang]
_A = [self.cur_lang_code_id]
_A = [self.eos_token_id]
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str ):
'''simple docstring'''
_A = self.lang_code_to_id[tgt_lang]
_A = [self.cur_lang_code_id]
_A = [self.eos_token_id]
| 330 | 1 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , __lowercase : int , __lowercase : str=13 , __lowercase : Tuple=7 , __lowercase : int=True , __lowercase : Optional[int]=True , __lowercase : List[str]=True , __lowercase : List[str]=True , __lowercase : Any=99 , __lowercase : int=32 , __lowercase : Optional[int]=2 , __lowercase : List[str]=4 , __lowercase : int=37 , __lowercase : Optional[int]="gelu" , __lowercase : Any=0.1 , __lowercase : List[Any]=0.1 , __lowercase : int=512 , __lowercase : str=16 , __lowercase : str=2 , __lowercase : Optional[Any]=0.02 , __lowercase : str=3 , __lowercase : str=4 , __lowercase : str=None , ):
'''simple docstring'''
__a = parent
__a = 13
__a = 7
__a = True
__a = True
__a = True
__a = True
__a = 99
__a = 32
__a = 2
__a = 4
__a = 37
__a = """gelu"""
__a = 0.1
__a = 0.1
__a = 512
__a = 16
__a = 2
__a = 0.02
__a = 3
__a = 4
__a = None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowercase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Any , __lowercase : Tuple , __lowercase : int , __lowercase : List[Any] , __lowercase : List[Any] ):
'''simple docstring'''
__a = TFRoFormerModel(config=__lowercase )
__a = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__a = [input_ids, input_mask]
__a = model(__lowercase )
__a = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Any , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any , __lowercase : int , __lowercase : Any , __lowercase : str , __lowercase : Optional[int] ):
'''simple docstring'''
__a = True
__a = TFRoFormerForCausalLM(config=__lowercase )
__a = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__a = model(__lowercase )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : int , __lowercase : List[str] , __lowercase : str ):
'''simple docstring'''
__a = TFRoFormerForMaskedLM(config=__lowercase )
__a = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__a = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Dict , __lowercase : List[str] , __lowercase : Optional[int] ):
'''simple docstring'''
__a = self.num_labels
__a = TFRoFormerForSequenceClassification(config=__lowercase )
__a = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__a = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Any , __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : int , __lowercase : Tuple , __lowercase : int , __lowercase : Any ):
'''simple docstring'''
__a = self.num_choices
__a = TFRoFormerForMultipleChoice(config=__lowercase )
__a = tf.tile(tf.expand_dims(__lowercase , 1 ) , (1, self.num_choices, 1) )
__a = tf.tile(tf.expand_dims(__lowercase , 1 ) , (1, self.num_choices, 1) )
__a = tf.tile(tf.expand_dims(__lowercase , 1 ) , (1, self.num_choices, 1) )
__a = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__a = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : str , __lowercase : int , __lowercase : Dict , __lowercase : List[str] ):
'''simple docstring'''
__a = self.num_labels
__a = TFRoFormerForTokenClassification(config=__lowercase )
__a = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__a = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : List[Any] , __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Any , __lowercase : str , __lowercase : Dict ):
'''simple docstring'''
__a = TFRoFormerForQuestionAnswering(config=__lowercase )
__a = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__a = model(__lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Optional[int] =(
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowerCamelCase : Optional[int] =(
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase : Optional[int] =False
__lowerCamelCase : Tuple =False
def UpperCamelCase_ ( self : Any , __lowercase : int , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : Tuple ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = TFRoFormerModelTester(self )
__a = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowercase )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowercase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(__lowercase )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
__a = tf.constant([[0, 1, 2, 3, 4, 5]] )
__a = model(__lowercase )[0]
# TODO Replace vocab size
__a = 50000
__a = [1, 6, vocab_size]
self.assertEqual(output.shape , __lowercase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__a = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowercase , atol=1E-4 )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
__lowerCamelCase : Dict =1e-4
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = tf.constant([[4, 10]] )
__a = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__a = emba(input_ids.shape )
__a = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__lowercase , __lowercase , atol=self.tolerance )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__a = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__a = emba.weight[:3, :5]
tf.debugging.assert_near(__lowercase , __lowercase , atol=self.tolerance )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
__lowerCamelCase : int =1e-4
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__a = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__a = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__a = embed_positions([2, 16, 768] )[None, None, :, :]
__a , __a = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowercase , __lowercase , __lowercase )
__a = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__a = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowercase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowercase , atol=self.tolerance )
| 709 |
from __future__ import annotations
lowerCamelCase__ = """Muhammad Umer Farooq"""
lowerCamelCase__ = """MIT"""
lowerCamelCase__ = """1.0.0"""
lowerCamelCase__ = """Muhammad Umer Farooq"""
lowerCamelCase__ = """contact@muhammadumerfarooq.me"""
lowerCamelCase__ = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Any , __lowercase : str ):
'''simple docstring'''
super().__init__()
__a = []
__a = domain
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : str , __lowercase : list[tuple[str, str | None]] ):
'''simple docstring'''
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__a = parse.urljoin(self.domain , __lowercase )
self.urls.append(__lowercase )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return ".".join(get_sub_domain_name(_SCREAMING_SNAKE_CASE ).split(""".""" )[-2:] )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return parse.urlparse(_SCREAMING_SNAKE_CASE ).netloc
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "https://github.com" ):
"""simple docstring"""
__a = get_domain_name(_SCREAMING_SNAKE_CASE )
# Initialize the parser
__a = Parser(_SCREAMING_SNAKE_CASE )
try:
# Open URL
__a = requests.get(_SCREAMING_SNAKE_CASE )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__a = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__a = requests.get(_SCREAMING_SNAKE_CASE )
# Get the valid email.
__a = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_SCREAMING_SNAKE_CASE )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase__ = emails_from_url("""https://github.com""")
print(F"""{len(emails)} emails found:""")
print("""\n""".join(sorted(emails)))
| 547 | 0 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __lowerCamelCase ( A__ ) -> Any:
"""simple docstring"""
if not is_accelerate_available():
return method
UpperCamelCase = version.parse(accelerate.__version__ ).base_version
if version.parse(A__ ) < version.parse('0.17.0' ):
return method
def wrapper(self , *A__ , **A__ ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *A__ , **A__ )
return wrapper
| 430 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=_a ):
_A : int = ['''torch''', '''torchsde''']
def __init__( self : Any ,*SCREAMING_SNAKE_CASE__ : Any ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
requires_backends(self ,["torch", "torchsde"] )
@classmethod
def __UpperCamelCase ( cls : List[str] ,*SCREAMING_SNAKE_CASE__ : Optional[int] ,**SCREAMING_SNAKE_CASE__ : Optional[int] ):
requires_backends(cls ,["torch", "torchsde"] )
@classmethod
def __UpperCamelCase ( cls : List[Any] ,*SCREAMING_SNAKE_CASE__ : Any ,**SCREAMING_SNAKE_CASE__ : List[str] ):
requires_backends(cls ,["torch", "torchsde"] )
| 143 | 0 |
"""simple docstring"""
from math import pow
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
UpperCAmelCase_ = int(pow(lowerCAmelCase__ , lowerCAmelCase__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
UpperCAmelCase_ , UpperCAmelCase_ = backtrack(
lowerCAmelCase__ , lowerCAmelCase__ , current_number + 1 , lowerCAmelCase__ , lowerCAmelCase__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
UpperCAmelCase_ , UpperCAmelCase_ = backtrack(
lowerCAmelCase__ , lowerCAmelCase__ , current_number + 1 , lowerCAmelCase__ , lowerCAmelCase__ )
return current_sum, solutions_count
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"Invalid input\n"
"needed_sum must be between 1 and 1000, power between 2 and 10." )
return backtrack(lowerCAmelCase__ , lowerCAmelCase__ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
"""simple docstring"""
import string
def a__ ( lowerCAmelCase__ ):
for key in range(len(string.ascii_uppercase ) ):
UpperCAmelCase_ = ""
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCAmelCase_ = string.ascii_uppercase.find(lowerCAmelCase__ )
UpperCAmelCase_ = num - key
if num < 0:
UpperCAmelCase_ = num + len(string.ascii_uppercase )
UpperCAmelCase_ = translated + string.ascii_uppercase[num]
else:
UpperCAmelCase_ = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def a__ ( ):
UpperCAmelCase_ = input("Encrypted message: " )
UpperCAmelCase_ = message.upper()
decrypt(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 14 | 1 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_lowerCAmelCase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=lowercase__ , cache_dir=lowercase__ )
_lowerCAmelCase = [t[-1] for t in os.walk(os.path.join(lowercase__ , os.listdir(lowercase__ )[0] , 'snapshots' ) )]
_lowerCAmelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
_lowerCAmelCase , _lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=lowercase__ )
_lowerCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowerCAmelCase = jax.random.PRNGKey(0 )
_lowerCAmelCase = 4
_lowerCAmelCase = jax.device_count()
_lowerCAmelCase = num_samples * [prompt]
_lowerCAmelCase = pipeline.prepare_inputs(lowercase__ )
# shard inputs and rng
_lowerCAmelCase = replicate(lowercase__ )
_lowerCAmelCase = jax.random.split(lowercase__ , lowercase__ )
_lowerCAmelCase = shard(lowercase__ )
_lowerCAmelCase = pipeline(lowercase__ , lowercase__ , lowercase__ , lowercase__ , jit=lowercase__ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(lowercase__ , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
_lowerCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowercase__ ) == num_samples
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
_lowerCAmelCase , _lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=lowercase__ )
_lowerCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowerCAmelCase = jax.random.PRNGKey(0 )
_lowerCAmelCase = 50
_lowerCAmelCase = jax.device_count()
_lowerCAmelCase = num_samples * [prompt]
_lowerCAmelCase = pipeline.prepare_inputs(lowercase__ )
# shard inputs and rng
_lowerCAmelCase = replicate(lowercase__ )
_lowerCAmelCase = jax.random.split(lowercase__ , lowercase__ )
_lowerCAmelCase = shard(lowercase__ )
_lowerCAmelCase = pipeline(lowercase__ , lowercase__ , lowercase__ , lowercase__ , jit=lowercase__ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(lowercase__ , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase , _lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=lowercase__ )
_lowerCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowerCAmelCase = jax.random.PRNGKey(0 )
_lowerCAmelCase = 50
_lowerCAmelCase = jax.device_count()
_lowerCAmelCase = num_samples * [prompt]
_lowerCAmelCase = pipeline.prepare_inputs(lowercase__ )
# shard inputs and rng
_lowerCAmelCase = replicate(lowercase__ )
_lowerCAmelCase = jax.random.split(lowercase__ , lowercase__ )
_lowerCAmelCase = shard(lowercase__ )
_lowerCAmelCase = pipeline(lowercase__ , lowercase__ , lowercase__ , lowercase__ , jit=lowercase__ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(lowercase__ , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase , _lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_lowerCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowerCAmelCase = jax.random.PRNGKey(0 )
_lowerCAmelCase = 50
_lowerCAmelCase = jax.device_count()
_lowerCAmelCase = num_samples * [prompt]
_lowerCAmelCase = pipeline.prepare_inputs(lowercase__ )
# shard inputs and rng
_lowerCAmelCase = replicate(lowercase__ )
_lowerCAmelCase = jax.random.split(lowercase__ , lowercase__ )
_lowerCAmelCase = shard(lowercase__ )
_lowerCAmelCase = pipeline(lowercase__ , lowercase__ , lowercase__ , lowercase__ , jit=lowercase__ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(lowercase__ , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
_lowerCAmelCase = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=lowercase__ , steps_offset=1 , )
_lowerCAmelCase , _lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=lowercase__ , safety_checker=lowercase__ , )
_lowerCAmelCase = scheduler.create_state()
_lowerCAmelCase = scheduler_state
_lowerCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowerCAmelCase = jax.random.PRNGKey(0 )
_lowerCAmelCase = 50
_lowerCAmelCase = jax.device_count()
_lowerCAmelCase = num_samples * [prompt]
_lowerCAmelCase = pipeline.prepare_inputs(lowercase__ )
# shard inputs and rng
_lowerCAmelCase = replicate(lowercase__ )
_lowerCAmelCase = jax.random.split(lowercase__ , lowercase__ )
_lowerCAmelCase = shard(lowercase__ )
_lowerCAmelCase = pipeline(lowercase__ , lowercase__ , lowercase__ , lowercase__ , jit=lowercase__ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(lowercase__ , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_lowerCAmelCase = jax.device_count()
_lowerCAmelCase = num_samples * [prompt]
_lowerCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , lowercase__ )
_lowerCAmelCase , _lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=lowercase__ , )
_lowerCAmelCase = replicate(lowercase__ )
_lowerCAmelCase = pipeline.prepare_inputs(lowercase__ )
_lowerCAmelCase = shard(lowercase__ )
_lowerCAmelCase = pipeline(lowercase__ , lowercase__ , lowercase__ , jit=lowercase__ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
_lowerCAmelCase = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
_lowerCAmelCase , _lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=lowercase__ , use_memory_efficient_attention=lowercase__ , )
_lowerCAmelCase = replicate(lowercase__ )
_lowerCAmelCase = pipeline.prepare_inputs(lowercase__ )
_lowerCAmelCase = shard(lowercase__ )
_lowerCAmelCase = pipeline(lowercase__ , lowercase__ , lowercase__ , jit=lowercase__ ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
_lowerCAmelCase = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 192 | import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : Dict , lowercase__ : Dict , lowercase__ : Optional[Any]=13 , lowercase__ : Dict=7 , lowercase__ : Dict=True , lowercase__ : Optional[Any]=True , lowercase__ : Optional[int]=False , lowercase__ : Any=True , lowercase__ : Union[str, Any]=99 , lowercase__ : Optional[int]=32 , lowercase__ : Any=5 , lowercase__ : Any=4 , lowercase__ : List[str]=64 , lowercase__ : Any="gelu" , lowercase__ : Optional[Any]=0.1 , lowercase__ : List[str]=0.1 , lowercase__ : Dict=5_12 , lowercase__ : List[str]=16 , lowercase__ : Union[str, Any]=2 , lowercase__ : str=0.0_2 , lowercase__ : Optional[int]=3 , lowercase__ : Union[str, Any]=4 , lowercase__ : Union[str, Any]=None , lowercase__ : Optional[int]=2 , lowercase__ : Optional[int]=2 , lowercase__ : List[Any]=2 , lowercase__ : Optional[int]=2 , lowercase__ : Union[str, Any]=4 , lowercase__ : Tuple=1 , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
_lowerCAmelCase = q_groups
_lowerCAmelCase = k_groups
_lowerCAmelCase = v_groups
_lowerCAmelCase = post_attention_groups
_lowerCAmelCase = intermediate_groups
_lowerCAmelCase = output_groups
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : int ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Dict , lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Tuple ):
_lowerCAmelCase = SqueezeBertModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , lowercase__ )
_lowerCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : int , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] ):
_lowerCAmelCase = SqueezeBertForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : str , lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : int ):
_lowerCAmelCase = SqueezeBertForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(
lowercase__ , attention_mask=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : List[str] , lowercase__ : List[Any] , lowercase__ : Tuple ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = SqueezeBertForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Optional[int] ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = SqueezeBertForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : Any ):
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = SqueezeBertForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
lowercase__ , attention_mask=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =(
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase__ =(
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ =False
UpperCamelCase__ =True
UpperCamelCase__ =False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = SqueezeBertModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=lowercase__ , dim=37 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = SqueezeBertModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
_lowerCAmelCase = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
_lowerCAmelCase = model(lowercase__ )[0]
_lowerCAmelCase = torch.Size((1, 3) )
self.assertEqual(output.shape , lowercase__ )
_lowerCAmelCase = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-4 ) )
| 192 | 1 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase__ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[Any] , __lowercase : bool , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None ):
"""simple docstring"""
super().__init__()
snake_case_ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
snake_case_ = torch.zeros(__lowercase , __lowercase )
else:
snake_case_ = None
snake_case_ = torch.nn.Parameter(__lowercase )
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def __init__( self : int , __lowercase : VQModel , __lowercase : CLIPTextModel , __lowercase : CLIPTokenizer , __lowercase : TransformeraDModel , __lowercase : VQDiffusionScheduler , __lowercase : LearnedClassifierFreeSamplingEmbeddings , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=__lowercase , transformer=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , scheduler=__lowercase , learned_classifier_free_sampling_embeddings=__lowercase , )
def snake_case__ ( self : Tuple , __lowercase : Dict , __lowercase : Optional[int] , __lowercase : Optional[Any] ):
"""simple docstring"""
snake_case_ = len(__lowercase ) if isinstance(__lowercase , __lowercase ) else 1
# get prompt text embeddings
snake_case_ = self.tokenizer(
__lowercase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
snake_case_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
snake_case_ = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
snake_case_ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__lowercase )
# duplicate text embeddings for each generation per prompt
snake_case_ = prompt_embeds.repeat_interleave(__lowercase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
snake_case_ = self.learned_classifier_free_sampling_embeddings.embeddings
snake_case_ = negative_prompt_embeds.unsqueeze(0 ).repeat(__lowercase , 1 , 1 )
else:
snake_case_ = [""] * batch_size
snake_case_ = text_input_ids.shape[-1]
snake_case_ = self.tokenizer(
__lowercase , padding="max_length" , max_length=__lowercase , truncation=__lowercase , return_tensors="pt" , )
snake_case_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
snake_case_ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__lowercase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ = negative_prompt_embeds.shape[1]
snake_case_ = negative_prompt_embeds.repeat(1 , __lowercase , 1 )
snake_case_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Optional[int] , __lowercase : Union[str, List[str]] , __lowercase : int = 1_00 , __lowercase : float = 5.0 , __lowercase : float = 1.0 , __lowercase : int = 1 , __lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[str] = "pil" , __lowercase : bool = True , __lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowercase : int = 1 , ):
"""simple docstring"""
if isinstance(__lowercase , __lowercase ):
snake_case_ = 1
elif isinstance(__lowercase , __lowercase ):
snake_case_ = len(__lowercase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__lowercase )}" )
snake_case_ = batch_size * num_images_per_prompt
snake_case_ = guidance_scale > 1.0
snake_case_ = self._encode_prompt(__lowercase , __lowercase , __lowercase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowercase , __lowercase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__lowercase )}." )
# get the initial completely masked latents unless the user supplied it
snake_case_ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
snake_case_ = self.transformer.num_vector_embeds - 1
snake_case_ = torch.full(__lowercase , __lowercase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
f" {self.transformer.num_vector_embeds - 1} (inclusive)." )
snake_case_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__lowercase , device=self.device )
snake_case_ = self.scheduler.timesteps.to(self.device )
snake_case_ = latents
for i, t in enumerate(self.progress_bar(__lowercase ) ):
# expand the sample if we are doing classifier free guidance
snake_case_ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
snake_case_ = self.transformer(__lowercase , encoder_hidden_states=__lowercase , timestep=__lowercase ).sample
if do_classifier_free_guidance:
snake_case_ , snake_case_ = model_output.chunk(2 )
snake_case_ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__lowercase , dim=1 , keepdim=__lowercase )
snake_case_ = self.truncate(__lowercase , __lowercase )
# remove `log(0)`'s (`-inf`s)
snake_case_ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(__lowercase , timestep=__lowercase , sample=__lowercase , generator=__lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowercase , __lowercase , __lowercase )
snake_case_ = self.vqvae.config.vq_embed_dim
snake_case_ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
snake_case_ = self.vqvae.quantize.get_codebook_entry(__lowercase , shape=__lowercase )
snake_case_ = self.vqvae.decode(__lowercase , force_not_quantize=__lowercase ).sample
snake_case_ = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(__lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowercase )
def snake_case__ ( self : List[str] , __lowercase : torch.FloatTensor , __lowercase : float ):
"""simple docstring"""
snake_case_ , snake_case_ = torch.sort(__lowercase , 1 , descending=__lowercase )
snake_case_ = torch.exp(__lowercase )
snake_case_ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
snake_case_ = torch.full_like(keep_mask[:, 0:1, :] , __lowercase )
snake_case_ = torch.cat((all_true, keep_mask) , dim=1 )
snake_case_ = keep_mask[:, :-1, :]
snake_case_ = keep_mask.gather(1 , indices.argsort(1 ) )
snake_case_ = log_p_x_0.clone()
snake_case_ = -torch.inf # -inf = log(0)
return rv
| 139 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = DistilBertTokenizer
lowerCAmelCase_ = DistilBertTokenizerFast
lowerCAmelCase_ = True
@slow
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
snake_case_ = tokenizer.encode("sequence builders" , add_special_tokens=__lowercase )
snake_case_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowercase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(__lowercase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 139 | 1 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowercase_ (A : str="" ):
snake_case__ : Optional[Any] = tempfile.mkdtemp()
return os.path.join(A , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Tuple ) ->Union[str, Any]:
snake_case__ : List[str] = torch.rand(1_2, dtype=torch.floataa ) - 0.5
snake_case__ : Union[str, Any] = AgentAudio(_snake_case )
snake_case__ : Any = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_snake_case, agent_type.to_raw(), atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(_snake_case ) )
# Ensure that the file contains the same value as the original tensor
snake_case__ , snake_case__ : Optional[int] = sf.read(_snake_case )
self.assertTrue(torch.allclose(_snake_case, torch.tensor(_snake_case ), atol=1e-4 ) )
def lowercase_ ( self : Tuple ) ->List[Any]:
snake_case__ : List[str] = torch.rand(1_2, dtype=torch.floataa ) - 0.5
snake_case__ : Optional[int] = get_new_path(suffix='.wav' )
sf.write(_snake_case, _snake_case, 1_6_0_0_0 )
snake_case__ : str = AgentAudio(_snake_case )
self.assertTrue(torch.allclose(_snake_case, agent_type.to_raw(), atol=1e-4 ) )
self.assertEqual(agent_type.to_string(), _snake_case )
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) ->str:
snake_case__ : List[str] = torch.randint(0, 2_5_6, (6_4, 6_4, 3) )
snake_case__ : Optional[int] = AgentImage(_snake_case )
snake_case__ : Optional[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_snake_case, agent_type._tensor, atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw(), Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_snake_case ) )
def lowercase_ ( self : List[Any] ) ->Dict:
snake_case__ : Union[str, Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
snake_case__ : Optional[Any] = Image.open(_snake_case )
snake_case__ : str = AgentImage(_snake_case )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_snake_case ) )
def lowercase_ ( self : str ) ->Optional[Any]:
snake_case__ : List[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
snake_case__ : Dict = Image.open(_snake_case )
snake_case__ : Optional[int] = AgentImage(_snake_case )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_snake_case ) )
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Dict ) ->Dict:
snake_case__ : Dict = 'Hey!'
snake_case__ : Dict = AgentText(_snake_case )
self.assertEqual(_snake_case, agent_type.to_string() )
self.assertEqual(_snake_case, agent_type.to_raw() )
self.assertEqual(_snake_case, _snake_case )
| 478 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ :Optional[int] = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Optional[int] = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :Optional[int] = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a_ :List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 478 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Union[str, Any] = logging.get_logger(__name__)
_A : Union[str, Any] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = """dpr"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple=3_05_22 , SCREAMING_SNAKE_CASE__ : List[Any]=7_68 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : List[str]=12 , SCREAMING_SNAKE_CASE__ : Tuple=30_72 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : int="absolute" , SCREAMING_SNAKE_CASE__ : int = 0 , **SCREAMING_SNAKE_CASE__ : str , ) -> Tuple:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = projection_dim
__lowerCAmelCase = position_embedding_type
| 330 | '''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def a ( self : List[str] ) -> Optional[int]:
__lowerCAmelCase = logging.get_logger()
# the current default level is logging.WARNING
__lowerCAmelCase = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
def a ( self : int ) -> str:
__lowerCAmelCase = logging.get_verbosity()
__lowerCAmelCase = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
__lowerCAmelCase = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl:
logger.warning(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl:
logger.warning(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl:
logger.warning(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def a ( self : Optional[Any] ) -> List[Any]:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__lowerCAmelCase = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
__lowerCAmelCase = os.getenv("""TRANSFORMERS_VERBOSITY""" , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = logging.log_levels[env_level_str]
__lowerCAmelCase = logging.get_verbosity()
self.assertEqual(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
__lowerCAmelCase = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def a ( self : int ) -> List[Any]:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
__lowerCAmelCase = logging.logging.getLogger()
with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def a ( self : str ) -> Optional[Any]:
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
__lowerCAmelCase = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
__lowerCAmelCase = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cl.out , msg + """\n""" )
def UpperCamelCase_ ( ) -> List[str]:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 330 | 1 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
_lowerCamelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Tuple ) -> Optional[int]:
for attribute in key.split('''.''' ):
UpperCAmelCase_ = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
UpperCAmelCase_ = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ) -> Tuple:
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.feature_extractor
UpperCAmelCase_ = hf_model.adapter
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase_ = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(__UpperCamelCase )[0].split('''.''' )[-2]
UpperCAmelCase_ = mapped_key.replace('''*''' , __UpperCamelCase )
if "weight_g" in name:
UpperCAmelCase_ = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase_ = '''weight_v'''
elif "bias" in name:
UpperCAmelCase_ = '''bias'''
elif "weight" in name:
UpperCAmelCase_ = '''weight'''
else:
UpperCAmelCase_ = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : Tuple ) -> List[str]:
UpperCAmelCase_ = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase_ = name.split('''.''' )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCAmelCase_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCAmelCase_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCAmelCase_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCAmelCase_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ = full_name.split('''adaptor.''' )[-1]
UpperCAmelCase_ = name.split('''.''' )
if items[1].isdigit():
UpperCAmelCase_ = int(items[1] )
else:
UpperCAmelCase_ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
UpperCAmelCase_ = value
logger.info(f'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
UpperCAmelCase_ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
UpperCAmelCase_ = value
logger.info(f'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
UpperCAmelCase_ = value
logger.info(f'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
UpperCAmelCase_ = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
UpperCAmelCase_ = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ = emb.weight.shape
UpperCAmelCase_ = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
UpperCAmelCase_ = emb.weight.data
return lin_layer
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , ) -> Dict:
UpperCAmelCase_ = WavaVecaConfig.from_pretrained(
__UpperCamelCase , add_adapter=__UpperCamelCase , adapter_stride=__UpperCamelCase , adapter_kernel_size=__UpperCamelCase , use_auth_token=__UpperCamelCase , output_hidden_size=__UpperCamelCase , )
UpperCAmelCase_ = MBartConfig.from_pretrained(__UpperCamelCase )
# load model
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
UpperCAmelCase_ = model[0].eval()
# load feature extractor
UpperCAmelCase_ = WavaVecaFeatureExtractor.from_pretrained(__UpperCamelCase , use_auth_token=__UpperCamelCase )
# set weights for wav2vec2 encoder
UpperCAmelCase_ = WavaVecaModel(__UpperCamelCase )
recursively_load_weights_wavaveca(model.encoder , __UpperCamelCase )
# load decoder weights
UpperCAmelCase_ = MBartForCausalLM(__UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__UpperCamelCase )
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
UpperCAmelCase_ = SpeechEncoderDecoderModel(encoder=__UpperCamelCase , decoder=__UpperCamelCase )
UpperCAmelCase_ = False
UpperCAmelCase_ = MBartaaTokenizer(__UpperCamelCase )
tokenizer.save_pretrained(__UpperCamelCase )
UpperCAmelCase_ = hf_wavavec.config.to_dict()
UpperCAmelCase_ = tokenizer.pad_token_id
UpperCAmelCase_ = tokenizer.bos_token_id
UpperCAmelCase_ = tokenizer.eos_token_id
UpperCAmelCase_ = '''mbart50'''
UpperCAmelCase_ = '''wav2vec2'''
UpperCAmelCase_ = tokenizer.eos_token_id
UpperCAmelCase_ = 25_0004
UpperCAmelCase_ = tokenizer.eos_token_id
UpperCAmelCase_ = SpeechEncoderDecoderConfig.from_dict(__UpperCamelCase )
hf_wavavec.save_pretrained(__UpperCamelCase )
feature_extractor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=10_24, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=25_00_04, type=int, help='`decoder_start_token_id` of model config')
_lowerCamelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 144 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> str:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
UpperCAmelCase_ = False
if num < 0:
UpperCAmelCase_ = True
UpperCAmelCase_ = -num
UpperCAmelCase_ = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__UpperCamelCase ) for e in binary )
return "0b" + "".join(str(__UpperCamelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 144 | 1 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
_UpperCamelCase = os.path.abspath(__snake_case )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
_UpperCamelCase = torch.load(__snake_case , map_location='''cpu''' )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
_UpperCamelCase = convert_pytorch_state_dict_to_flax(__snake_case , __snake_case )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
_UpperCamelCase = convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case )
return flax_state_dict
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , ):
def is_key_or_prefix_key_in_dict(__snake_case ) -> bool:
return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0
# layer norm
_UpperCamelCase = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
_UpperCamelCase = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
_UpperCamelCase = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# embedding
_UpperCamelCase = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
_UpperCamelCase = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ):
_UpperCamelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_UpperCamelCase = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ):
_UpperCamelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_UpperCamelCase = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_UpperCamelCase = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
_UpperCamelCase = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
_UpperCamelCase = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
_UpperCamelCase = pt_tuple_key[-2] + '''_v'''
if name is not None:
_UpperCamelCase = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _snake_case ( __snake_case , __snake_case ):
# convert pytorch tensor to numpy
_UpperCamelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
_UpperCamelCase = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
_UpperCamelCase = flax_model.params['''params''']
else:
_UpperCamelCase = flax_model.params
_UpperCamelCase = flatten_dict(__snake_case )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_UpperCamelCase = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__snake_case )
_UpperCamelCase = {}
_UpperCamelCase = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
_UpperCamelCase = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_UpperCamelCase = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
_UpperCamelCase = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_UpperCamelCase = pt_tuple_key[1:]
# Correctly rename weight parameters
_UpperCamelCase , _UpperCamelCase = rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
_UpperCamelCase = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_UpperCamelCase = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
_UpperCamelCase = jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
_UpperCamelCase = jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
_UpperCamelCase = jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def _snake_case ( __snake_case , __snake_case ):
import torch
# Load the index
_UpperCamelCase = {}
for shard_file in shard_filenames:
# load using msgpack utils
_UpperCamelCase = torch.load(__snake_case )
_UpperCamelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
_UpperCamelCase = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_UpperCamelCase = flax_model.params['''params''']
_UpperCamelCase = flatten_dict(__snake_case )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
_UpperCamelCase = flax_model.params
_UpperCamelCase = flatten_dict(__snake_case )
_UpperCamelCase = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
_UpperCamelCase = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_UpperCamelCase = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
_UpperCamelCase = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_UpperCamelCase = pt_tuple_key[1:]
# Correctly rename weight parameters
_UpperCamelCase , _UpperCamelCase = rename_key_and_reshape_tensor(
__snake_case , __snake_case , __snake_case , __snake_case )
# add model prefix if necessary
_UpperCamelCase = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_UpperCamelCase = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
_UpperCamelCase = jnp.asarray(__snake_case )
continue
if "var" in flax_key[-1]:
_UpperCamelCase = jnp.asarray(__snake_case )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__snake_case , __snake_case )
continue
# also add unexpected weight so that warning is thrown
_UpperCamelCase = jnp.asarray(__snake_case )
else:
# also add unexpected weight so that warning is thrown
_UpperCamelCase = jnp.asarray(__snake_case )
return unflatten_dict(__snake_case )
def _snake_case ( __snake_case , __snake_case ):
_UpperCamelCase = os.path.abspath(__snake_case )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
_UpperCamelCase = getattr(__snake_case , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__snake_case , '''rb''' ) as state_f:
try:
_UpperCamelCase = from_bytes(__snake_case , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(__snake_case , __snake_case )
def _snake_case ( __snake_case , __snake_case ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
_UpperCamelCase = flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values()
if any(__snake_case ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
_UpperCamelCase = jax.tree_util.tree_map(
lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case )
_UpperCamelCase = flatten_dict(__snake_case )
_UpperCamelCase = pt_model.state_dict()
_UpperCamelCase = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
_UpperCamelCase = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
_UpperCamelCase = []
_UpperCamelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_UpperCamelCase = flax_key_tuple[0] == pt_model.base_model_prefix
_UpperCamelCase = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
_UpperCamelCase = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
_UpperCamelCase = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict:
# conv layer
_UpperCamelCase = flax_key_tuple[:-1] + ('''weight''',)
_UpperCamelCase = jnp.transpose(__snake_case , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict:
# linear layer
_UpperCamelCase = flax_key_tuple[:-1] + ('''weight''',)
_UpperCamelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_UpperCamelCase = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
_UpperCamelCase = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
_UpperCamelCase = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
_UpperCamelCase = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
_UpperCamelCase = '''.'''.join(__snake_case )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
_UpperCamelCase = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
_UpperCamelCase = key.split('''.''' )
_UpperCamelCase = None
if key_components[-3::2] == ["parametrizations", "original0"]:
_UpperCamelCase = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
_UpperCamelCase = key_components[-2] + '''_v'''
if name is not None:
_UpperCamelCase = key_components[:-3] + [name]
_UpperCamelCase = '''.'''.join(__snake_case )
_UpperCamelCase = key
if flax_key in special_pt_names:
_UpperCamelCase = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
_UpperCamelCase = np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor
_UpperCamelCase = torch.from_numpy(__snake_case )
# remove from missing keys
missing_keys.remove(__snake_case )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__snake_case )
pt_model.load_state_dict(__snake_case )
# re-transform missing_keys to list
_UpperCamelCase = list(__snake_case )
if len(__snake_case ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(__snake_case ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
'''If your task is similar to the task the model of the checkpoint was trained on, '''
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 71 | from __future__ import annotations
import typing
from collections import Counter
def _snake_case ( __snake_case ):
_UpperCamelCase = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(__snake_case , max_perimeter + 1 ):
_UpperCamelCase = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__snake_case ):
_UpperCamelCase = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _snake_case ( __snake_case = 1000 ):
_UpperCamelCase = pythagorean_triple(__snake_case )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'Perimeter {solution()} has maximum solutions')
| 71 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 384 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
UpperCamelCase_ = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def _UpperCAmelCase ( ) -> List[Any]:
_lowerCAmelCase : Dict = Github(os.environ["""GITHUB_TOKEN"""] )
_lowerCAmelCase : Any = g.get_repo("""huggingface/diffusers""" )
_lowerCAmelCase : Tuple = repo.get_issues(state="""open""" )
for issue in open_issues:
_lowerCAmelCase : Union[str, Any] = sorted(issue.get_comments() , key=lambda _lowerCamelCase : i.created_at , reverse=_lowerCamelCase )
_lowerCAmelCase : List[Any] = comments[0] if len(_lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 384 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase = get_tests_dir("fixtures")
lowercase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase = get_tests_dir("fixtures/dummy-config.json")
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = 0
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(_lowercase , _lowercase )
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ = AutoFeatureExtractor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def _UpperCamelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
snake_case_ = AutoFeatureExtractor.from_pretrained(_lowercase ).to_dict()
config_dict.pop('feature_extractor_type' )
snake_case_ = WavaVecaFeatureExtractor(**_lowercase )
# save in new folder
model_config.save_pretrained(_lowercase )
config.save_pretrained(_lowercase )
snake_case_ = AutoFeatureExtractor.from_pretrained(_lowercase )
# make sure private variable is not incorrectly saved
snake_case_ = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_lowercase , _lowercase )
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ = AutoFeatureExtractor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def _UpperCamelCase ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
_lowercase , 'bert-base is not a local folder and is not a valid model identifier' ):
snake_case_ = AutoFeatureExtractor.from_pretrained('bert-base' )
def _UpperCamelCase ( self ) -> int:
with self.assertRaisesRegex(
_lowercase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
snake_case_ = AutoFeatureExtractor.from_pretrained(_lowercase , revision='aaaaaa' )
def _UpperCamelCase ( self ) -> Any:
with self.assertRaisesRegex(
_lowercase , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
snake_case_ = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def _UpperCamelCase ( self ) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowercase ):
snake_case_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowercase ):
snake_case_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_lowercase )
snake_case_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_lowercase )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowercase )
snake_case_ = AutoFeatureExtractor.from_pretrained(_lowercase , trust_remote_code=_lowercase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def _UpperCamelCase ( self ) -> Optional[int]:
try:
AutoConfig.register('custom' , _lowercase )
AutoFeatureExtractor.register(_lowercase , _lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowercase ):
AutoFeatureExtractor.register(_lowercase , _lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case_ = CustomFeatureExtractor.from_pretrained(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowercase )
snake_case_ = AutoFeatureExtractor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def _UpperCamelCase ( self ) -> List[Any]:
class UpperCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
lowerCAmelCase = True
try:
AutoConfig.register('custom' , _lowercase )
AutoFeatureExtractor.register(_lowercase , _lowercase )
# If remote code is not set, the default is to use local
snake_case_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
snake_case_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_lowercase )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
snake_case_ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_lowercase )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(_lowercase , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 707 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
lowercase = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
lowerCAmelCase = field(default=snake_case_ , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase = field(default=snake_case_ , metadata={'''help''': '''whether to use adafactor'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(default=snake_case_ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(
default='''linear''' , metadata={'''help''': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 607 | 0 |
"""simple docstring"""
import torch
def _snake_case ( ):
"""simple docstring"""
if torch.cuda.is_available():
_lowerCamelCase : Tuple = torch.cuda.device_count()
else:
_lowerCamelCase : str = 0
print(F'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 88 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :List[str] = 'ylacombe/bark-small'
_lowerCAmelCase :int = tempfile.mkdtemp()
_lowerCAmelCase :List[str] = 'en_speaker_1'
_lowerCAmelCase :Union[str, Any] = 'This is a test string'
_lowerCAmelCase :List[Any] = 'speaker_embeddings_path.json'
_lowerCAmelCase :str = 'speaker_embeddings'
def SCREAMING_SNAKE_CASE__ ( self: str , **_UpperCAmelCase: Optional[Any] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :List[Any] = self.get_tokenizer()
_lowerCAmelCase :List[str] = BarkProcessor(tokenizer=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase :Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCAmelCase :Any = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase :List[Any] = 35
_lowerCAmelCase :Optional[int] = 2
_lowerCAmelCase :Dict = 8
_lowerCAmelCase :Dict = {
'semantic_prompt': np.ones(_UpperCAmelCase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase :int = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase )
_lowerCAmelCase :Optional[int] = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase :Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self: List[Any] ):
_lowerCAmelCase :Tuple = self.get_tokenizer()
_lowerCAmelCase :Union[str, Any] = BarkProcessor(tokenizer=_UpperCAmelCase )
_lowerCAmelCase :List[Any] = processor(text=self.input_string )
_lowerCAmelCase :List[str] = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() ) | 687 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase :Dict = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :List[Any] = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 715 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _a ( _lowercase : int ):
'''simple docstring'''
__UpperCAmelCase : int = int(number**0.5 )
return number == sq * sq
def _a ( _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : int ):
'''simple docstring'''
__UpperCAmelCase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__UpperCAmelCase : int = x_den * y_den * z_den
__UpperCAmelCase : int = gcd(_lowercase , _lowercase )
top //= hcf
bottom //= hcf
return top, bottom
def _a ( _lowercase : int = 35 ):
'''simple docstring'''
__UpperCAmelCase : set = set()
__UpperCAmelCase : int
__UpperCAmelCase : Fraction = Fraction(0 )
__UpperCAmelCase : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__UpperCAmelCase : Optional[int] = x_num * y_den + x_den * y_num
__UpperCAmelCase : Dict = x_den * y_den
__UpperCAmelCase : List[Any] = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__UpperCAmelCase : Dict = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
# n=2
__UpperCAmelCase : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__UpperCAmelCase : Any = x_den * x_den * y_den * y_den
if is_sq(_lowercase ) and is_sq(_lowercase ):
__UpperCAmelCase : List[Any] = int(sqrt(_lowercase ) )
__UpperCAmelCase : Tuple = int(sqrt(_lowercase ) )
__UpperCAmelCase : Union[str, Any] = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__UpperCAmelCase : Union[str, Any] = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
# n=-1
__UpperCAmelCase : Union[str, Any] = x_num * y_num
__UpperCAmelCase : List[Any] = x_den * y_num + x_num * y_den
__UpperCAmelCase : Any = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__UpperCAmelCase : Optional[Any] = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
# n=2
__UpperCAmelCase : Optional[Any] = x_num * x_num * y_num * y_num
__UpperCAmelCase : Dict = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_lowercase ) and is_sq(_lowercase ):
__UpperCAmelCase : Any = int(sqrt(_lowercase ) )
__UpperCAmelCase : List[Any] = int(sqrt(_lowercase ) )
__UpperCAmelCase : Any = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__UpperCAmelCase : int = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
for num, den in unique_s:
total += Fraction(_lowercase , _lowercase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""") | 266 | 0 |
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def _lowerCamelCase( lowercase__ ) -> Any:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= [state.process_index]
__lowercase= gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, F'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= create_tensor(lowercase__ )
__lowercase= broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
if state.is_main_process:
__lowercase= torch.arange(state.num_processes + 1 ).to(state.device )
else:
__lowercase= torch.arange(state.num_processes ).to(state.device )
__lowercase= pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'sum' )
__lowercase= torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
if state.num_processes != 2:
return
__lowercase= create_tensor(lowercase__ )
__lowercase= reduce(lowercase__ , 'mean' )
__lowercase= torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), F'{reduced_tensor} != {truth_tensor}'
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
main()
def _lowerCamelCase( ) -> int:
'''simple docstring'''
__lowercase= PartialState()
state.print(F'State: {state}' )
state.print('testing gather' )
test_gather(lowercase__ )
state.print('testing gather_object' )
test_gather_object(lowercase__ )
state.print('testing broadcast' )
test_broadcast(lowercase__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(lowercase__ )
state.print('testing reduce_sum' )
test_reduce_sum(lowercase__ )
state.print('testing reduce_mean' )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 230 | '''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def __snake_case ( lowerCAmelCase : int = 200_0000 ):
__UpperCAmelCase = [0]
__UpperCAmelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__UpperCAmelCase = 0
# the area corresponding to the grid that gives the product closest to target
__UpperCAmelCase = 0
# an estimate of b, using the quadratic formula
__UpperCAmelCase = 42
# the largest integer less than b_estimate
__UpperCAmelCase = 42
# the largest integer less than b_estimate
__UpperCAmelCase = 42
# the triangle number corresponding to b_floor
__UpperCAmelCase = 42
# the triangle number corresponding to b_ceil
__UpperCAmelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__UpperCAmelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__UpperCAmelCase = floor(lowerCAmelCase )
__UpperCAmelCase = ceil(lowerCAmelCase )
__UpperCAmelCase = triangle_numbers[b_floor]
__UpperCAmelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__UpperCAmelCase = triangle_b_first_guess * triangle_a
__UpperCAmelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__UpperCAmelCase = triangle_b_second_guess * triangle_a
__UpperCAmelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"{solution() = }")
| 396 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( a , unittest.TestCase ):
'''simple docstring'''
_snake_case = KandinskyInpaintPipeline
_snake_case = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_snake_case = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_snake_case = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case = False
@property
def snake_case ( self ) -> str:
return 32
@property
def snake_case ( self ) -> str:
return 32
@property
def snake_case ( self ) -> List[Any]:
return self.time_input_dim
@property
def snake_case ( self ) -> Any:
return self.time_input_dim * 4
@property
def snake_case ( self ) -> List[Any]:
return 1_00
@property
def snake_case ( self ) -> Dict:
__lowerCAmelCase : List[str] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def snake_case ( self ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase : Any = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__lowerCAmelCase : str = MultilingualCLIP(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = text_encoder.eval()
return text_encoder
@property
def snake_case ( self ) -> List[Any]:
torch.manual_seed(0 )
__lowerCAmelCase : str = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowerCAmelCase : List[str] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE )
return model
@property
def snake_case ( self ) -> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case ( self ) -> Tuple:
torch.manual_seed(0 )
__lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case ( self ) -> List[str]:
__lowerCAmelCase : str = self.dummy_text_encoder
__lowerCAmelCase : Dict = self.dummy_tokenizer
__lowerCAmelCase : Dict = self.dummy_unet
__lowerCAmelCase : Optional[int] = self.dummy_movq
__lowerCAmelCase : Optional[int] = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type='epsilon' , thresholding=SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ) -> Union[str, Any]:
__lowerCAmelCase : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(SCREAMING_SNAKE_CASE )
# create init_image
__lowerCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase : Any = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE ) ).convert('RGB' ).resize((2_56, 2_56) )
# create mask
__lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa )
__lowerCAmelCase : Tuple = 0
if str(SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : Any = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def snake_case ( self ) -> Union[str, Any]:
__lowerCAmelCase : List[Any] = 'cpu'
__lowerCAmelCase : int = self.get_dummy_components()
__lowerCAmelCase : List[str] = self.pipeline_class(**SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Union[str, Any] = output.images
__lowerCAmelCase : Tuple = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE ) , return_dict=SCREAMING_SNAKE_CASE , )[0]
__lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
__lowerCAmelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : Optional[Any] = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def snake_case ( self ) -> List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ) -> Dict:
__lowerCAmelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
__lowerCAmelCase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__lowerCAmelCase : Any = np.ones((7_68, 7_68) , dtype=np.floataa )
__lowerCAmelCase : Any = 0
__lowerCAmelCase : Tuple = 'a hat'
__lowerCAmelCase : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
__lowerCAmelCase : int = pipeline.to(SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase : Dict = pipe_prior(
SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__lowerCAmelCase : Union[str, Any] = pipeline(
SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , image_embeds=SCREAMING_SNAKE_CASE , negative_image_embeds=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
__lowerCAmelCase : List[Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 123 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
A_ = False
A_ = logging.get_logger(__name__)
A_ = "ybelkada/fonts"
def A ( ) -> Union[str, Any]:
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
'Pix2StructImageProcessor. Please upgrade torch.' )
def A ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
requires_backends(_UpperCAmelCase ,['torch'] )
_check_torch_version()
__lowerCAmelCase : Dict = image_tensor.unsqueeze(0 )
__lowerCAmelCase : str = torch.nn.functional.unfold(_UpperCAmelCase ,(patch_height, patch_width) ,stride=(patch_height, patch_width) )
__lowerCAmelCase : List[str] = patches.reshape(image_tensor.size(0 ) ,image_tensor.size(1 ) ,_UpperCAmelCase ,_UpperCAmelCase ,-1 )
__lowerCAmelCase : str = patches.permute(0 ,4 ,2 ,3 ,1 ).reshape(
image_tensor.size(2 ) // patch_height ,image_tensor.size(3 ) // patch_width ,image_tensor.size(1 ) * patch_height * patch_width ,)
return patches.unsqueeze(0 )
def A ( _UpperCAmelCase : str ,_UpperCAmelCase : int = 3_6 ,_UpperCAmelCase : str = "black" ,_UpperCAmelCase : str = "white" ,_UpperCAmelCase : int = 5 ,_UpperCAmelCase : int = 5 ,_UpperCAmelCase : int = 5 ,_UpperCAmelCase : int = 5 ,_UpperCAmelCase : Optional[bytes] = None ,_UpperCAmelCase : Optional[str] = None ,) -> Image.Image:
'''simple docstring'''
requires_backends(_UpperCAmelCase ,'vision' )
# Add new lines so that each line is no more than 80 characters.
__lowerCAmelCase : List[Any] = textwrap.TextWrapper(width=8_0 )
__lowerCAmelCase : Tuple = wrapper.wrap(text=_UpperCAmelCase )
__lowerCAmelCase : Tuple = '\n'.join(_UpperCAmelCase )
if font_bytes is not None and font_path is None:
__lowerCAmelCase : Optional[int] = io.BytesIO(_UpperCAmelCase )
elif font_path is not None:
__lowerCAmelCase : Optional[Any] = font_path
else:
__lowerCAmelCase : int = hf_hub_download(_UpperCAmelCase ,'Arial.TTF' )
__lowerCAmelCase : Union[str, Any] = ImageFont.truetype(_UpperCAmelCase ,encoding='UTF-8' ,size=_UpperCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowerCAmelCase : Optional[Any] = ImageDraw.Draw(Image.new('RGB' ,(1, 1) ,_UpperCAmelCase ) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = temp_draw.textbbox((0, 0) ,_UpperCAmelCase ,_UpperCAmelCase )
# Create the actual image with a bit of padding around the text.
__lowerCAmelCase : List[Any] = text_width + left_padding + right_padding
__lowerCAmelCase : Any = text_height + top_padding + bottom_padding
__lowerCAmelCase : int = Image.new('RGB' ,(image_width, image_height) ,_UpperCAmelCase )
__lowerCAmelCase : List[Any] = ImageDraw.Draw(_UpperCAmelCase )
draw.text(xy=(left_padding, top_padding) ,text=_UpperCAmelCase ,fill=_UpperCAmelCase ,font=_UpperCAmelCase )
return image
def A ( _UpperCAmelCase : np.ndarray ,_UpperCAmelCase : str ,**_UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
requires_backends(_UpperCAmelCase ,'vision' )
# Convert to PIL image if necessary
__lowerCAmelCase : List[Any] = to_pil_image(_UpperCAmelCase )
__lowerCAmelCase : Tuple = render_text(_UpperCAmelCase ,**_UpperCAmelCase )
__lowerCAmelCase : Dict = max(header_image.width ,image.width )
__lowerCAmelCase : List[Any] = int(image.height * (new_width / image.width) )
__lowerCAmelCase : Dict = int(header_image.height * (new_width / header_image.width) )
__lowerCAmelCase : Union[str, Any] = Image.new('RGB' ,(new_width, new_height + new_header_height) ,'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) ,(0, 0) )
new_image.paste(image.resize((new_width, new_height) ) ,(0, new_header_height) )
# Convert back to the original framework if necessary
__lowerCAmelCase : Optional[int] = to_numpy_array(_UpperCAmelCase )
if infer_channel_dimension_format(_UpperCAmelCase ) == ChannelDimension.LAST:
__lowerCAmelCase : Optional[Any] = to_channel_dimension_format(_UpperCAmelCase ,ChannelDimension.LAST )
return new_image
class UpperCamelCase__ ( a ):
'''simple docstring'''
_snake_case = ['''flattened_patches''']
def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 20_48 , SCREAMING_SNAKE_CASE = False , **SCREAMING_SNAKE_CASE , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = patch_size if patch_size is not None else {'height': 16, 'width': 16}
__lowerCAmelCase : Optional[int] = do_normalize
__lowerCAmelCase : str = do_convert_rgb
__lowerCAmelCase : Any = max_patches
__lowerCAmelCase : str = is_vqa
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> np.ndarray:
requires_backends(self.extract_flattened_patches , 'torch' )
_check_torch_version()
# convert to torch
__lowerCAmelCase : List[Any] = to_channel_dimension_format(SCREAMING_SNAKE_CASE , ChannelDimension.FIRST )
__lowerCAmelCase : int = torch.from_numpy(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = patch_size['height'], patch_size['width']
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = get_image_size(SCREAMING_SNAKE_CASE )
# maximize scale s.t.
__lowerCAmelCase : List[str] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowerCAmelCase : Tuple = max(min(math.floor(scale * image_height / patch_height ) , SCREAMING_SNAKE_CASE ) , 1 )
__lowerCAmelCase : Union[str, Any] = max(min(math.floor(scale * image_width / patch_width ) , SCREAMING_SNAKE_CASE ) , 1 )
__lowerCAmelCase : int = max(num_feasible_rows * patch_height , 1 )
__lowerCAmelCase : Any = max(num_feasible_cols * patch_width , 1 )
__lowerCAmelCase : List[Any] = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=SCREAMING_SNAKE_CASE , antialias=SCREAMING_SNAKE_CASE , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowerCAmelCase : Optional[Any] = torch_extract_patches(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = patches.shape
__lowerCAmelCase : List[str] = patches_shape[1]
__lowerCAmelCase : str = patches_shape[2]
__lowerCAmelCase : Union[str, Any] = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowerCAmelCase : int = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowerCAmelCase : List[Any] = torch.arange(SCREAMING_SNAKE_CASE ).reshape([rows, 1] ).repeat(1 , SCREAMING_SNAKE_CASE ).reshape([rows * columns, 1] )
__lowerCAmelCase : List[str] = torch.arange(SCREAMING_SNAKE_CASE ).reshape([1, columns] ).repeat(SCREAMING_SNAKE_CASE , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowerCAmelCase : List[Any] = row_ids.to(torch.floataa )
__lowerCAmelCase : Any = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowerCAmelCase : Tuple = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowerCAmelCase : Optional[int] = torch.nn.functional.pad(SCREAMING_SNAKE_CASE , [0, 0, 0, max_patches - (rows * columns)] ).float()
__lowerCAmelCase : Optional[int] = to_numpy_array(SCREAMING_SNAKE_CASE )
return result
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> np.ndarray:
if image.dtype == np.uinta:
__lowerCAmelCase : Dict = image.astype(np.floataa )
# take mean across the whole `image`
__lowerCAmelCase : Optional[int] = np.mean(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = np.std(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = max(SCREAMING_SNAKE_CASE , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> ImageInput:
__lowerCAmelCase : str = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowerCAmelCase : int = patch_size if patch_size is not None else self.patch_size
__lowerCAmelCase : int = max_patches if max_patches is not None else self.max_patches
__lowerCAmelCase : Optional[Any] = self.is_vqa
if kwargs.get('data_format' , SCREAMING_SNAKE_CASE ) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ' )
__lowerCAmelCase : Dict = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowerCAmelCase : List[Any] = [convert_to_rgb(SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
__lowerCAmelCase : Tuple = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.' )
__lowerCAmelCase : Optional[int] = kwargs.pop('font_bytes' , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = kwargs.pop('font_path' , SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = [header_text] * len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = [
render_header(SCREAMING_SNAKE_CASE , header_text[i] , font_bytes=SCREAMING_SNAKE_CASE , font_path=SCREAMING_SNAKE_CASE )
for i, image in enumerate(SCREAMING_SNAKE_CASE )
]
if do_normalize:
__lowerCAmelCase : int = [self.normalize(image=SCREAMING_SNAKE_CASE ) for image in images]
# convert to torch tensor and permute
__lowerCAmelCase : Union[str, Any] = [
self.extract_flattened_patches(image=SCREAMING_SNAKE_CASE , max_patches=SCREAMING_SNAKE_CASE , patch_size=SCREAMING_SNAKE_CASE )
for image in images
]
# create attention mask in numpy
__lowerCAmelCase : Optional[int] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowerCAmelCase : List[Any] = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=SCREAMING_SNAKE_CASE )
return encoded_outputs
| 123 | 1 |
def lowerCamelCase__ ( _lowercase = 1000 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution()) | 30 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCAmelCase_ :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=2 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=10 , _UpperCamelCase=0.02 , _UpperCamelCase=None , _UpperCamelCase=2 , _UpperCamelCase=2 , )-> Tuple:
_A = parent
_A = batch_size
_A = patch_size
_A = max_length
_A = num_mel_bins
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = scope
_A = frequency_stride
_A = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_A = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_A = (self.max_length - self.patch_size) // self.time_stride + 1
_A = frequency_out_dimension * time_out_dimension
_A = num_patches + 2
def UpperCamelCase ( self )-> int:
_A = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = self.get_config()
return config, input_values, labels
def UpperCamelCase ( self )-> int:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )-> Optional[Any]:
_A = ASTModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_A = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self )-> Any:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {'input_values': input_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
__UpperCAmelCase =(
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase =(
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
def UpperCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )-> Union[str, Any]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCamelCase ( self )-> List[str]:
_A = ASTModelTester(self )
_A = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase ( self )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def UpperCamelCase ( self )-> List[Any]:
pass
def UpperCamelCase ( self )-> str:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def UpperCamelCase ( self )-> List[str]:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCamelCase )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['input_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def UpperCamelCase ( self )-> Optional[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
@slow
def UpperCamelCase ( self )-> Tuple:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = ASTModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_A = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
_A , _A = torchaudio.load(__UpperCamelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self )-> Dict:
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def UpperCamelCase ( self )-> Any:
_A = self.default_feature_extractor
_A = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(_UpperCamelCase )
_A = self.default_feature_extractor
_A , _A = prepare_audio()
_A = audio.squeeze().numpy()
_A = feature_extractor(_UpperCamelCase , sampling_rate=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
_A = model(**_UpperCamelCase )
# verify the logits
_A = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
_A = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
| 292 | 0 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 702 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase ( lowercase_ ):
def __init__( self :int , _lowercase :CLIPSegForImageSegmentation , _lowercase :CLIPSegProcessor , _lowercase :AutoencoderKL , _lowercase :CLIPTextModel , _lowercase :CLIPTokenizer , _lowercase :UNetaDConditionModel , _lowercase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _lowercase :StableDiffusionSafetyChecker , _lowercase :CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowercase__ = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , _lowercase , standard_warn=_lowercase )
lowercase__ = dict(scheduler.config )
lowercase__ = 1
lowercase__ = FrozenDict(_lowercase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowercase__ = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , _lowercase , standard_warn=_lowercase )
lowercase__ = dict(scheduler.config )
lowercase__ = True
lowercase__ = FrozenDict(_lowercase )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=_lowercase , segmentation_processor=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , unet=_lowercase , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , )
def UpperCAmelCase ( self :List[str] , _lowercase :Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowercase )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
self.enable_attention_slicing(_lowercase )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self :Optional[Any] , _lowercase :Union[str, List[str]] , _lowercase :Union[torch.FloatTensor, PIL.Image.Image] , _lowercase :str , _lowercase :int = 5_12 , _lowercase :int = 5_12 , _lowercase :int = 50 , _lowercase :float = 7.5 , _lowercase :Optional[Union[str, List[str]]] = None , _lowercase :Optional[int] = 1 , _lowercase :float = 0.0 , _lowercase :Optional[torch.Generator] = None , _lowercase :Optional[torch.FloatTensor] = None , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , _lowercase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowercase :int = 1 , **_lowercase :int , ):
'''simple docstring'''
lowercase__ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowercase__ = self.segmentation_model(**_lowercase )
lowercase__ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase__ = self.numpy_to_pil(_lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase__ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , )
| 611 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _lowerCAmelCase ( lowerCAmelCase = "AAPL" ):
'''simple docstring'''
UpperCAmelCase = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
UpperCAmelCase = BeautifulSoup(requests.get(A__ ).text , """html.parser""" )
UpperCAmelCase = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 673 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : list[list[int | float]] ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = len(A__ )
lowerCAmelCase_ : Union[str, Any] = len(matrix[0] )
lowerCAmelCase_ : Optional[int] = min(A__ , A__ )
for row in range(A__ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , A__ ):
lowerCAmelCase_ : int = matrix[col][row] / matrix[row][row]
for i in range(A__ , A__ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
lowerCAmelCase_ : int = True
for i in range(row + 1 , A__ ):
if matrix[i][row] != 0:
lowerCAmelCase_, lowerCAmelCase_ : List[Any] = matrix[i], matrix[row]
lowerCAmelCase_ : Dict = False
break
if reduce:
rank -= 1
for i in range(A__ ):
lowerCAmelCase_ : List[Any] = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275 | 0 |
from __future__ import annotations
def UpperCAmelCase ( _lowerCamelCase : list[int | float] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if len(_lowerCamelCase ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(_lowerCamelCase )
or left < -len(_lowerCamelCase )
or right >= len(_lowerCamelCase )
or right < -len(_lowerCamelCase )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
SCREAMING_SNAKE_CASE__ : Optional[int] = (left + right) >> 1 # the middle
SCREAMING_SNAKE_CASE__ : List[Any] = find_max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # find max in range[left, mid]
SCREAMING_SNAKE_CASE__ : Optional[int] = find_max(_lowerCamelCase , mid + 1 , _lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 709 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase :List[Any] = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase ) | 26 | 0 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__magic_name__ : Optional[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCamelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase__ = None
def A__ ( A_ , A_ , ) -> List[str]:
import pyspark
def generate_fn():
_lowercase = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) )
for partition_id in partition_order:
_lowercase = df_with_partition_id.select("*" ).where(F"""part_id = {partition_id}""" ).drop("part_id" )
_lowercase = partition_df.collect()
_lowercase = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class UpperCamelCase__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self : int , __A : "pyspark.sql.DataFrame" , __A : int=None , ):
"""simple docstring"""
_lowercase = df
_lowercase = partition_order or range(self.df.rdd.getNumPartitions() )
_lowercase = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Tuple ):
"""simple docstring"""
yield from self.generate_examples_fn()
def snake_case ( self : Optional[int] , __A : np.random.Generator ):
"""simple docstring"""
_lowercase = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__A )
return SparkExamplesIterable(self.df , partition_order=__A )
def snake_case ( self : Optional[Any] , __A : int , __A : int ):
"""simple docstring"""
_lowercase = self.split_shard_indices_by_worker(__A , __A )
return SparkExamplesIterable(self.df , partition_order=__A )
@property
def snake_case ( self : List[Any] ):
"""simple docstring"""
return len(self.partition_order )
class UpperCamelCase__ ( datasets.DatasetBuilder ):
"""simple docstring"""
UpperCAmelCase__ = SparkConfig
def __init__( self : Dict , __A : "pyspark.sql.DataFrame" , __A : str = None , __A : str = None , **__A : Any , ):
"""simple docstring"""
import pyspark
_lowercase = pyspark.sql.SparkSession.builder.getOrCreate()
_lowercase = df
_lowercase = working_dir
super().__init__(
cache_dir=__A , config_name=str(self.df.semanticHash() ) , **__A , )
def snake_case ( self : List[str] ):
"""simple docstring"""
# Returns the path of the created file.
def create_cache_and_write_probe(__A : Dict ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__A )
_lowercase = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__A , "a" )
return [probe_file]
if self._spark.conf.get("spark.master" , "" ).startswith("local" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowercase = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__A ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" )
def snake_case ( self : List[str] ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def snake_case ( self : Optional[Any] , __A : datasets.download.download_manager.DownloadManager ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def snake_case ( self : Optional[int] , __A : Dict ):
"""simple docstring"""
import pyspark
def get_arrow_batch_size(__A : Optional[Any] ):
for batch in it:
yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} )
_lowercase = self.df.count()
_lowercase = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowercase = (
self.df.limit(__A )
.repartition(1 )
.mapInArrow(__A , "batch_bytes: long" )
.agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowercase = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowercase = min(__A , int(approx_total_size / max_shard_size ) )
_lowercase = self.df.repartition(__A )
def snake_case ( self : int , __A : str , __A : str , __A : int , ):
"""simple docstring"""
import pyspark
_lowercase = ParquetWriter if file_format == "parquet" else ArrowWriter
_lowercase = os.path.join(self._working_dir , os.path.basename(__A ) ) if self._working_dir else fpath
_lowercase = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowercase = self.config.features
_lowercase = self._writer_batch_size
_lowercase = self._fs.storage_options
def write_arrow(__A : Any ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowercase = pyspark.TaskContext().taskAttemptId()
_lowercase = next(__A , __A )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , )
_lowercase = 0
_lowercase = writer_class(
features=__A , path=working_fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , writer_batch_size=__A , storage_options=__A , embed_local_files=__A , )
_lowercase = pa.Table.from_batches([first_batch] )
writer.write_table(__A )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowercase , _lowercase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
shard_id += 1
_lowercase = writer_class(
features=writer._features , path=working_fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , writer_batch_size=__A , storage_options=__A , embed_local_files=__A , )
_lowercase = pa.Table.from_batches([batch] )
writer.write_table(__A )
if writer._num_bytes > 0:
_lowercase , _lowercase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__A ) ):
_lowercase = os.path.join(os.path.dirname(__A ) , os.path.basename(__A ) )
shutil.move(__A , __A )
_lowercase = (
self.df.mapInArrow(__A , "task_id: long, num_examples: long, num_bytes: long" )
.groupBy("task_id" )
.agg(
pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def snake_case ( self : str , __A : "datasets.SplitGenerator" , __A : str = "arrow" , __A : Optional[Union[str, int]] = None , __A : Optional[int] = None , **__A : Any , ):
"""simple docstring"""
self._validate_cache_dir()
_lowercase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__A )
_lowercase = not is_remote_filesystem(self._fs )
_lowercase = os.path.join if is_local else posixpath.join
_lowercase = "-TTTTT-SSSSS-of-NNNNN"
_lowercase = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
_lowercase = path_join(self._output_dir , __A )
_lowercase = 0
_lowercase = 0
_lowercase = 0
_lowercase = []
_lowercase = []
for task_id, content in self._prepare_split_single(__A , __A , __A ):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__A )
_lowercase = total_num_examples
_lowercase = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
_lowercase = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowercase = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__A : int , __A : int , __A : int , ):
rename(
__A , fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , fpath.replace("TTTTT-SSSSS" , f"""{global_shard_id:05d}""" ).replace("NNNNN" , f"""{total_shards:05d}""" ) , )
_lowercase = []
_lowercase = 0
for i in range(len(__A ) ):
_lowercase , _lowercase = task_id_and_num_shards[i]
for shard_id in range(__A ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__A , len(__A ) ).map(lambda __A : _rename_shard(*__A ) ).collect()
else:
# don't use any pattern
_lowercase = 0
_lowercase = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("SSSSS" , f"""{shard_id:05d}""" ).replace("TTTTT" , f"""{task_id:05d}""" ) , fpath.replace(__A , "" ) , )
def snake_case ( self : List[Any] , __A : "datasets.SplitGenerator" , ):
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 497 |
'''simple docstring'''
from __future__ import annotations
def A__ ( A_ , A_ ) -> list[str]:
if nth_term == "":
return [""]
_lowercase = int(A_ )
_lowercase = int(A_ )
_lowercase = []
for temp in range(int(A_ ) ):
series.append(F"""1 / {pow(temp + 1 , int(A_ ) )}""" if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ : Any = int(input('''Enter the last number (nth term) of the P-Series'''))
__magic_name__ : Dict = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 497 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( __A : int ):
if not isinstance(__A , __A ):
raise TypeError('''Input value must be an \'int\' type''' )
a_ : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 666 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCAmelCase ( __A : List[str] , __A : List[Any] ):
a_ : Any = []
for part_id in partition_order:
a_ : str = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(__A ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : Union[str, Any] = spark.range(1_00 ).repartition(1 )
a_ : Any = Spark(__A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : List[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : int = spark.range(10 ).repartition(2 )
a_ : Tuple = [1, 0]
a_ : List[str] = _generate_iterable_examples(__A , __A ) # Reverse the partitions.
a_ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , __A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(10 ).repartition(1 )
a_ : Tuple = SparkExamplesIterable(__A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__A ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
a_ : Union[str, Any] = lambda __A : x.reverse()
a_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [2, 1, 0] )
a_ : str = SparkExamplesIterable(__A ).shuffle_data_sources(__A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ : Dict = SparkExamplesIterable(__A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [0, 2] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ : List[Any] = SparkExamplesIterable(__A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__A , [1, 3] )
for i, (row_id, row_dict) in enumerate(__A ):
a_ , a_ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
a_ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
a_ : List[Any] = spark.range(1_00 ).repartition(1 )
a_ : Optional[Any] = Spark(__A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 666 | 1 |
"""simple docstring"""
from torch import nn
def __lowerCAmelCase ( lowercase : Any ) -> List[str]:
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'Unsupported activation function: {act_fn}' )
| 178 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__snake_case = ["""gpt2"""]
__snake_case = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
def __init__( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case : Dict = tokenizer
snake_case : List[str] = AutoConfig.from_pretrained(UpperCamelCase__ )
snake_case : List[str] = TFGPTaLMHeadModel.from_config(UpperCamelCase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = self.tokenizer(UpperCamelCase__ )
snake_case : Tuple = tokenized["input_ids"].to_tensor()
snake_case : str = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
snake_case : int = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ )["logits"]
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
super().setUp()
snake_case : Optional[int] = [GPTaTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
snake_case : str = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
snake_case : Optional[int] = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
snake_case : str = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
snake_case : Any = tokenizer([test_inputs] , return_tensors="tf" )
snake_case : str = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
snake_case : Any = python_outputs[key].numpy()
snake_case : Any = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa ) == tf_outputs_values ) )
@slow
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case : str = tf.function(UpperCamelCase__ )
for test_inputs in self.test_sentences:
snake_case : str = tf.constant(UpperCamelCase__ )
snake_case : Optional[int] = compiled_tokenizer(UpperCamelCase__ )
snake_case : Dict = tf_tokenizer(UpperCamelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case : Tuple = ModelToSave(tokenizer=UpperCamelCase__ )
snake_case : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case : Union[str, Any] = model.serving(UpperCamelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case : int = Path(UpperCamelCase__ ) / "saved.model"
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={"serving_default": model.serving} )
snake_case : Optional[Any] = tf.saved_model.load(UpperCamelCase__ )
snake_case : Union[str, Any] = loaded_model.signatures["serving_default"](UpperCamelCase__ )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case : Any = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case : List[str] = tf_tokenizer(UpperCamelCase__ ) # Build model with some sample inputs
snake_case : Dict = tf_tokenizer.get_config()
snake_case : Union[str, Any] = TFGPTaTokenizer.from_config(UpperCamelCase__ )
snake_case : Optional[Any] = model_from_config(UpperCamelCase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
snake_case : List[str] = 12_3123
for max_length in [3, 5, 1024]:
snake_case : str = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case : List[Any] = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__ )
snake_case : str = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 178 | 1 |
def A_ ( a ):
"""simple docstring"""
stooge(a , 0 , len(a ) - 1 )
return arr
def A_ ( a , a , a ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
SCREAMING_SNAKE_CASE_ : int = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(a , a , (h - t) )
# Recursively sort last 2/3 elements
stooge(a , i + t , (a) )
# Recursively sort first 2/3 elements
stooge(a , a , (h - t) )
if __name__ == "__main__":
lowerCAmelCase : str = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase : List[str] = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 353 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : str = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : Union[str, Any] = '''table-transformer'''
SCREAMING_SNAKE_CASE : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE : Optional[int] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
SCREAMING_SNAKE_CASE_ : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = backbone_config.get('model_type' )
SCREAMING_SNAKE_CASE_ : str = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config_class.from_dict(_SCREAMING_SNAKE_CASE )
# set timm attributes to None
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = None, None, None
SCREAMING_SNAKE_CASE_ : Any = use_timm_backbone
SCREAMING_SNAKE_CASE_ : int = backbone_config
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE_ : int = d_model
SCREAMING_SNAKE_CASE_ : int = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ : Any = encoder_layers
SCREAMING_SNAKE_CASE_ : str = encoder_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ : str = decoder_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = dropout
SCREAMING_SNAKE_CASE_ : int = attention_dropout
SCREAMING_SNAKE_CASE_ : Dict = activation_dropout
SCREAMING_SNAKE_CASE_ : Optional[int] = activation_function
SCREAMING_SNAKE_CASE_ : Optional[Any] = init_std
SCREAMING_SNAKE_CASE_ : int = init_xavier_std
SCREAMING_SNAKE_CASE_ : str = encoder_layerdrop
SCREAMING_SNAKE_CASE_ : Any = decoder_layerdrop
SCREAMING_SNAKE_CASE_ : Any = encoder_layers
SCREAMING_SNAKE_CASE_ : List[str] = auxiliary_loss
SCREAMING_SNAKE_CASE_ : List[str] = position_embedding_type
SCREAMING_SNAKE_CASE_ : Union[str, Any] = backbone
SCREAMING_SNAKE_CASE_ : Tuple = use_pretrained_backbone
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE_ : List[Any] = class_cost
SCREAMING_SNAKE_CASE_ : List[Any] = bbox_cost
SCREAMING_SNAKE_CASE_ : Union[str, Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE_ : Optional[Any] = mask_loss_coefficient
SCREAMING_SNAKE_CASE_ : str = dice_loss_coefficient
SCREAMING_SNAKE_CASE_ : Dict = bbox_loss_coefficient
SCREAMING_SNAKE_CASE_ : int = giou_loss_coefficient
SCREAMING_SNAKE_CASE_ : Any = eos_coefficient
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.d_model
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : List[Any] = version.parse('''1.11''')
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return 1e-5
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return 12
| 353 | 1 |
'''simple docstring'''
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _snake_case ( _a , unittest.TestCase ):
_A : Tuple = RoFormerTokenizer
_A : Union[str, Any] = RoFormerTokenizerFast
_A : Dict = True
_A : List[str] = True
def __UpperCamelCase ( self : int ):
super().setUp()
def __UpperCamelCase ( self : Optional[int] ,**SCREAMING_SNAKE_CASE__ : List[Any] ):
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:Optional[int] = "永和服装饰品有限公司,今天天气非常好"
SCREAMING_SNAKE_CASE:List[Any] = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def __UpperCamelCase ( self : int ):
SCREAMING_SNAKE_CASE:Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Tuple = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE:int = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,output_text.split() )
SCREAMING_SNAKE_CASE:str = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE:List[Any] = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE:Optional[int] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE:Dict = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,output_text.split() )
SCREAMING_SNAKE_CASE:str = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE:Tuple = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : str ):
pass
def __UpperCamelCase ( self : Optional[Any] ):
pass
def __UpperCamelCase ( self : List[Any] ):
pass
| 143 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _snake_case ( _a , _a , unittest.TestCase ):
_A : List[Any] = IFInpaintingPipeline
_A : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_A : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_A : List[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCamelCase ( self : Dict ):
return self._get_dummy_components()
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 ):
if str(SCREAMING_SNAKE_CASE__ ).startswith("mps" ):
SCREAMING_SNAKE_CASE:Any = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE:int = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = floats_tensor((1, 3, 32, 32) ,rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = floats_tensor((1, 3, 32, 32) ,rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def __UpperCamelCase ( self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCamelCase ( self : Any ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" ,reason="float16 requires CUDA" )
def __UpperCamelCase ( self : Any ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase ( self : Optional[int] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase ( self : Any ):
self._test_save_load_local()
def __UpperCamelCase ( self : int ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 ,)
| 143 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 1000000 ) -> int:
lowercase : Dict =set(range(3 , __magic_name__ , 2 ) )
primes.add(2 )
for p in range(3 , __magic_name__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __magic_name__ , __magic_name__ ) ) )
lowercase : List[Any] =[float(__magic_name__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(__magic_name__ , limit + 1 , __magic_name__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 | 1 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __a ( SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def __a ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
# word like '180' or '身高' or '神'
for char in word:
__UpperCAmelCase = ord(SCREAMING_SNAKE_CASE )
if not _is_chinese_char(SCREAMING_SNAKE_CASE ):
return 0
return 1
def __a ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = set()
for token in tokens:
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE ) > 1 and is_chinese(SCREAMING_SNAKE_CASE )
if chinese_word:
word_set.add(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = list(SCREAMING_SNAKE_CASE )
return word_list
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
__UpperCAmelCase = max([len(SCREAMING_SNAKE_CASE ) for w in chinese_word_set] )
__UpperCAmelCase = bert_tokens
__UpperCAmelCase , __UpperCAmelCase = 0, len(SCREAMING_SNAKE_CASE )
while start < end:
__UpperCAmelCase = True
if is_chinese(bert_word[start] ):
__UpperCAmelCase = min(end - start , SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE , 1 , -1 ):
__UpperCAmelCase = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__UpperCAmelCase = '''##''' + bert_word[j]
__UpperCAmelCase = start + i
__UpperCAmelCase = False
break
if single_word:
start += 1
return bert_word
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__UpperCAmelCase = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 1_0_0 ):
__UpperCAmelCase = ltp_tokenizer.seg(lines[i : i + 1_0_0] )[0]
__UpperCAmelCase = [get_chinese_word(SCREAMING_SNAKE_CASE ) for r in res]
ltp_res.extend(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = []
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 1_0_0 ):
__UpperCAmelCase = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , max_length=5_1_2 )
bert_res.extend(res['''input_ids'''] )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = []
for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = []
for id in input_ids:
__UpperCAmelCase = bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE )
input_tokens.append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = add_sub_symbol(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(SCREAMING_SNAKE_CASE ):
if token[:2] == "##":
__UpperCAmelCase = token[2:]
# save chinese tokens' pos
if len(SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE ) ):
ref_id.append(SCREAMING_SNAKE_CASE )
ref_ids.append(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
return ref_ids
def __a ( SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
__UpperCAmelCase = f.readlines()
__UpperCAmelCase = [line.strip() for line in data if len(SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__UpperCAmelCase = LTP(args.ltp ) # faster in GPU device
__UpperCAmelCase = BertTokenizer.from_pretrained(args.bert )
__UpperCAmelCase = prepare_ref(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
__UpperCAmelCase = [json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' for ref in ref_ids]
f.writelines(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
A_ : Union[str, Any] = parser.parse_args()
main(args)
| 303 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A_ : Any = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Optional[Any]:
'''simple docstring'''
if attention_mask is None:
__UpperCAmelCase = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__UpperCAmelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__UpperCAmelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=16 , lowercase__=2 , lowercase__=4 , lowercase__=4 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=32 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=0.02 , ) -> Union[str, Any]:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = pad_token_id
__UpperCAmelCase = bos_token_id
__UpperCAmelCase = initializer_range
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__UpperCAmelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__UpperCAmelCase = shift_tokens_right(lowercase__ , 1 , 2 )
__UpperCAmelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase__ , )
__UpperCAmelCase = prepare_blenderbot_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, inputs_dict
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase , __UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> Tuple:
__UpperCAmelCase = 20
__UpperCAmelCase = model_class_name(lowercase__ )
__UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] )
__UpperCAmelCase , __UpperCAmelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
__UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
__UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase__ , )
__UpperCAmelCase = model.decode(lowercase__ , lowercase__ )
__UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> Optional[int]:
__UpperCAmelCase = 20
__UpperCAmelCase = model_class_name(lowercase__ )
__UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] )
__UpperCAmelCase , __UpperCAmelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
__UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
__UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase__ , decoder_position_ids=lowercase__ , )
__UpperCAmelCase = model.decode(lowercase__ , lowercase__ , decoder_attention_mask=lowercase__ )
__UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class A_ ( unittest.TestCase ):
'''simple docstring'''
a__ = 99
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__UpperCAmelCase = input_ids.shape[0]
__UpperCAmelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._get_config_and_data()
__UpperCAmelCase = FlaxBlenderbotSmallForConditionalGeneration(lowercase__ )
__UpperCAmelCase = lm_model(input_ids=lowercase__ )
__UpperCAmelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowercase__ )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__UpperCAmelCase = FlaxBlenderbotSmallForConditionalGeneration(lowercase__ )
__UpperCAmelCase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__UpperCAmelCase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__UpperCAmelCase = lm_model(input_ids=lowercase__ , decoder_input_ids=lowercase__ )
__UpperCAmelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowercase__ )
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__UpperCAmelCase = shift_tokens_right(lowercase__ , 1 , 2 )
__UpperCAmelCase = np.equal(lowercase__ , 1 ).astype(np.floataa ).sum()
__UpperCAmelCase = np.equal(lowercase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowercase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A_ ( _a , unittest.TestCase , _a ):
'''simple docstring'''
a__ = True
a__ = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
a__ = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = FlaxBlenderbotSmallModelTester(self )
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase = self._prepare_for_class(lowercase__ , lowercase__ )
__UpperCAmelCase = model_class(lowercase__ )
@jax.jit
def encode_jitted(lowercase__ , lowercase__=None , **lowercase__ ):
return model.encode(input_ids=lowercase__ , attention_mask=lowercase__ )
with self.subTest('''JIT Enabled''' ):
__UpperCAmelCase = encode_jitted(**lowercase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__UpperCAmelCase = encode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__UpperCAmelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase__ , lowercase__ , lowercase__ ):
return model.decode(
decoder_input_ids=lowercase__ , decoder_attention_mask=lowercase__ , encoder_outputs=lowercase__ , )
with self.subTest('''JIT Enabled''' ):
__UpperCAmelCase = decode_jitted(**lowercase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__UpperCAmelCase = decode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase_ (self ) -> Dict:
for model_class_name in self.all_model_classes:
__UpperCAmelCase = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__UpperCAmelCase = np.ones((1, 1) ) * model.config.eos_token_id
__UpperCAmelCase = model(lowercase__ )
self.assertIsNotNone(lowercase__ )
| 303 | 1 |
from math import sqrt
def __lowercase ( _UpperCAmelCase = 1_000_000 ) -> int:
'''simple docstring'''
__lowercase = 0
__lowercase = 0
__lowercase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowerCamelCase_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"{solution() = }")
| 712 | import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case ( __snake_case ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 576 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.